[crankshaft] Remove Crankshaft.

R=danno@chromium.org
BUG=v8:6408

Change-Id: I6613557e474f415293feb164a30c15485d81ff2c
Reviewed-on: https://chromium-review.googlesource.com/547717
Reviewed-by: Daniel Clifford <danno@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46212}
This commit is contained in:
Michael Starzinger 2017-06-26 11:10:31 +02:00 committed by Commit Bot
parent f030838700
commit c751e79ec3
134 changed files with 2 additions and 130380 deletions

114
BUILD.gn
View File

@ -1455,63 +1455,6 @@ v8_source_set("v8_base") {
"src/counters-inl.h",
"src/counters.cc",
"src/counters.h",
"src/crankshaft/compilation-phase.cc",
"src/crankshaft/compilation-phase.h",
"src/crankshaft/hydrogen-alias-analysis.h",
"src/crankshaft/hydrogen-bce.cc",
"src/crankshaft/hydrogen-bce.h",
"src/crankshaft/hydrogen-canonicalize.cc",
"src/crankshaft/hydrogen-canonicalize.h",
"src/crankshaft/hydrogen-check-elimination.cc",
"src/crankshaft/hydrogen-check-elimination.h",
"src/crankshaft/hydrogen-dce.cc",
"src/crankshaft/hydrogen-dce.h",
"src/crankshaft/hydrogen-dehoist.cc",
"src/crankshaft/hydrogen-dehoist.h",
"src/crankshaft/hydrogen-environment-liveness.cc",
"src/crankshaft/hydrogen-environment-liveness.h",
"src/crankshaft/hydrogen-escape-analysis.cc",
"src/crankshaft/hydrogen-escape-analysis.h",
"src/crankshaft/hydrogen-flow-engine.h",
"src/crankshaft/hydrogen-gvn.cc",
"src/crankshaft/hydrogen-gvn.h",
"src/crankshaft/hydrogen-infer-representation.cc",
"src/crankshaft/hydrogen-infer-representation.h",
"src/crankshaft/hydrogen-infer-types.cc",
"src/crankshaft/hydrogen-infer-types.h",
"src/crankshaft/hydrogen-instructions.cc",
"src/crankshaft/hydrogen-instructions.h",
"src/crankshaft/hydrogen-load-elimination.cc",
"src/crankshaft/hydrogen-load-elimination.h",
"src/crankshaft/hydrogen-mark-unreachable.cc",
"src/crankshaft/hydrogen-mark-unreachable.h",
"src/crankshaft/hydrogen-range-analysis.cc",
"src/crankshaft/hydrogen-range-analysis.h",
"src/crankshaft/hydrogen-redundant-phi.cc",
"src/crankshaft/hydrogen-redundant-phi.h",
"src/crankshaft/hydrogen-removable-simulates.cc",
"src/crankshaft/hydrogen-removable-simulates.h",
"src/crankshaft/hydrogen-representation-changes.cc",
"src/crankshaft/hydrogen-representation-changes.h",
"src/crankshaft/hydrogen-sce.cc",
"src/crankshaft/hydrogen-sce.h",
"src/crankshaft/hydrogen-store-elimination.cc",
"src/crankshaft/hydrogen-store-elimination.h",
"src/crankshaft/hydrogen-types.cc",
"src/crankshaft/hydrogen-types.h",
"src/crankshaft/hydrogen-uint32-analysis.cc",
"src/crankshaft/hydrogen-uint32-analysis.h",
"src/crankshaft/hydrogen.cc",
"src/crankshaft/hydrogen.h",
"src/crankshaft/lithium-allocator-inl.h",
"src/crankshaft/lithium-allocator.cc",
"src/crankshaft/lithium-allocator.h",
"src/crankshaft/lithium-codegen.cc",
"src/crankshaft/lithium-codegen.h",
"src/crankshaft/lithium-inl.h",
"src/crankshaft/lithium.cc",
"src/crankshaft/lithium.h",
"src/crankshaft/unique.h",
"src/date.cc",
"src/date.h",
"src/dateparser-inl.h",
@ -2066,12 +2009,6 @@ v8_source_set("v8_base") {
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.h",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.cc",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.h",
"src/crankshaft/ia32/lithium-ia32.cc",
"src/crankshaft/ia32/lithium-ia32.h",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
@ -2106,12 +2043,6 @@ v8_source_set("v8_base") {
"src/compiler/x64/instruction-selector-x64.cc",
"src/compiler/x64/unwinding-info-writer-x64.cc",
"src/compiler/x64/unwinding-info-writer-x64.h",
"src/crankshaft/x64/lithium-codegen-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.h",
"src/crankshaft/x64/lithium-gap-resolver-x64.cc",
"src/crankshaft/x64/lithium-gap-resolver-x64.h",
"src/crankshaft/x64/lithium-x64.cc",
"src/crankshaft/x64/lithium-x64.h",
"src/debug/x64/debug-x64.cc",
"src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
@ -2172,12 +2103,6 @@ v8_source_set("v8_base") {
"src/compiler/arm/instruction-selector-arm.cc",
"src/compiler/arm/unwinding-info-writer-arm.cc",
"src/compiler/arm/unwinding-info-writer-arm.h",
"src/crankshaft/arm/lithium-arm.cc",
"src/crankshaft/arm/lithium-arm.h",
"src/crankshaft/arm/lithium-codegen-arm.cc",
"src/crankshaft/arm/lithium-codegen-arm.h",
"src/crankshaft/arm/lithium-gap-resolver-arm.cc",
"src/crankshaft/arm/lithium-gap-resolver-arm.h",
"src/debug/arm/debug-arm.cc",
"src/full-codegen/arm/full-codegen-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
@ -2226,15 +2151,6 @@ v8_source_set("v8_base") {
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/compiler/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/arm64/unwinding-info-writer-arm64.h",
"src/crankshaft/arm64/delayed-masm-arm64-inl.h",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
"src/crankshaft/arm64/lithium-arm64.cc",
"src/crankshaft/arm64/lithium-arm64.h",
"src/crankshaft/arm64/lithium-codegen-arm64.cc",
"src/crankshaft/arm64/lithium-codegen-arm64.h",
"src/crankshaft/arm64/lithium-gap-resolver-arm64.cc",
"src/crankshaft/arm64/lithium-gap-resolver-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/full-codegen/arm64/full-codegen-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
@ -2249,12 +2165,6 @@ v8_source_set("v8_base") {
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.h",
"src/crankshaft/mips/lithium-gap-resolver-mips.cc",
"src/crankshaft/mips/lithium-gap-resolver-mips.h",
"src/crankshaft/mips/lithium-mips.cc",
"src/crankshaft/mips/lithium-mips.h",
"src/debug/mips/debug-mips.cc",
"src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
@ -2288,12 +2198,6 @@ v8_source_set("v8_base") {
"src/compiler/mips64/instruction-codes-mips64.h",
"src/compiler/mips64/instruction-scheduler-mips64.cc",
"src/compiler/mips64/instruction-selector-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.h",
"src/crankshaft/mips64/lithium-gap-resolver-mips64.cc",
"src/crankshaft/mips64/lithium-gap-resolver-mips64.h",
"src/crankshaft/mips64/lithium-mips64.cc",
"src/crankshaft/mips64/lithium-mips64.h",
"src/debug/mips64/debug-mips64.cc",
"src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
@ -2327,12 +2231,6 @@ v8_source_set("v8_base") {
"src/compiler/ppc/instruction-codes-ppc.h",
"src/compiler/ppc/instruction-scheduler-ppc.cc",
"src/compiler/ppc/instruction-selector-ppc.cc",
"src/crankshaft/ppc/lithium-codegen-ppc.cc",
"src/crankshaft/ppc/lithium-codegen-ppc.h",
"src/crankshaft/ppc/lithium-gap-resolver-ppc.cc",
"src/crankshaft/ppc/lithium-gap-resolver-ppc.h",
"src/crankshaft/ppc/lithium-ppc.cc",
"src/crankshaft/ppc/lithium-ppc.h",
"src/debug/ppc/debug-ppc.cc",
"src/full-codegen/ppc/full-codegen-ppc.cc",
"src/ic/ppc/access-compiler-ppc.cc",
@ -2366,12 +2264,6 @@ v8_source_set("v8_base") {
"src/compiler/s390/instruction-codes-s390.h",
"src/compiler/s390/instruction-scheduler-s390.cc",
"src/compiler/s390/instruction-selector-s390.cc",
"src/crankshaft/s390/lithium-codegen-s390.cc",
"src/crankshaft/s390/lithium-codegen-s390.h",
"src/crankshaft/s390/lithium-gap-resolver-s390.cc",
"src/crankshaft/s390/lithium-gap-resolver-s390.h",
"src/crankshaft/s390/lithium-s390.cc",
"src/crankshaft/s390/lithium-s390.h",
"src/debug/s390/debug-s390.cc",
"src/full-codegen/s390/full-codegen-s390.cc",
"src/ic/s390/access-compiler-s390.cc",
@ -2405,12 +2297,6 @@ v8_source_set("v8_base") {
"src/compiler/x87/instruction-codes-x87.h",
"src/compiler/x87/instruction-scheduler-x87.cc",
"src/compiler/x87/instruction-selector-x87.cc",
"src/crankshaft/x87/lithium-codegen-x87.cc",
"src/crankshaft/x87/lithium-codegen-x87.h",
"src/crankshaft/x87/lithium-gap-resolver-x87.cc",
"src/crankshaft/x87/lithium-gap-resolver-x87.h",
"src/crankshaft/x87/lithium-x87.cc",
"src/crankshaft/x87/lithium-x87.h",
"src/debug/x87/debug-x87.cc",
"src/full-codegen/x87/full-codegen-x87.cc",
"src/ic/x87/access-compiler-x87.cc",

View File

@ -16,10 +16,10 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
@ -31,6 +31,7 @@
#include "src/log-inl.h"
#include "src/messages.h"
#include "src/objects/map.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
@ -203,11 +204,6 @@ void CompilationJob::RecordOptimizedCompilationStats() const {
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
compiled_functions, code_size, compilation_time);
}
if (FLAG_hydrogen_stats) {
isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
time_taken_to_execute_,
time_taken_to_finalize_);
}
}
Isolate* CompilationJob::isolate() const { return info()->isolate(); }

View File

@ -1,9 +0,0 @@
set noparent
bmeurer@chromium.org
danno@chromium.org
jarin@chromium.org
jkummerow@chromium.org
verwaest@chromium.org
# COMPONENT: Blink>JavaScript>Compiler

View File

@ -1 +0,0 @@
rmcilroy@chromium.org

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,386 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
#define V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
#include "src/ast/scopes.h"
#include "src/crankshaft/arm/lithium-arm.h"
#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
!info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
LinkRegisterStatus GetLinkRegisterState() const {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DwVfpRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DwVfpRegister dbl_scratch);
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagIU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
MemOperand PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
int constant_key,
int element_size,
int shift_size,
int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
Scope* scope() const { return scope_; }
Register scratch0() { return r9; }
LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true, Label* if_false,
Handle<String> class_name, Register input,
Register temporary, Register temporary2);
bool HasAllocatedStackSlots() const {
return chunk()->HasAllocatedStackSlots();
}
int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
int GetTotalFrameSlotCount() const {
return chunk()->GetTotalFrameSlotCount();
}
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode);
void CallCode(
Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallCodeGeneric(
Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
Register scratch2, Register scratch3);
// Generate a direct call to a known function. Expects the function
// to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
MemOperand BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding);
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
template <class InstrType>
void EmitTrueBranch(InstrType instr, Condition condition);
template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
DwVfpRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
Label* is_not_string,
SmiCheck check_needed);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset,
AllocationSiteMode mode);
void EnsureSpaceForLazyDeopt(int space_needed) override;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
DCHECK(codegen_->info()->is_calling());
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
codegen_->masm_->PushSafepointRegisters();
}
~PushSafepointRegistersScope() {
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
codegen_->masm_->PopSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_

View File

@ -1,303 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
#include "src/assembler-inl.h"
#include "src/crankshaft/arm/lithium-codegen-arm.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
// We use the root register to spill a value while breaking a cycle in parallel
// moves. We don't need access to roots while resolving the move list and using
// the root register has two advantages:
// - It is not in crankshaft allocatable registers list, so it can't interfere
// with any of the moves we are resolving.
// - We don't need to push it on the stack, as we can reload it with its value
// once we have resolved a cycle.
#define kSavedValueRegister kRootRegister
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL), need_to_restore_root_(false) { }
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) {
DCHECK(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when by reaching this move again.
PerformMove(i);
if (in_cycle_) {
RestoreValue();
}
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
if (need_to_restore_root_) {
DCHECK(kSavedValueRegister.is(kRootRegister));
__ InitializeRootRegister();
need_to_restore_root_ = false;
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
// We can only find a cycle, when doing a depth-first traversal of moves,
// be encountering the starting move again. So by spilling the source of
// the starting move, we break the cycle. All moves are then unblocked,
// and the starting move is completed by writing the spilled value to
// its destination. All other moves from the spilled source have been
// completed prior to breaking the cycle.
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
DCHECK(!moves_[index].IsPending());
DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
void LGapResolver::BreakCycle(int index) {
// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
// broken and we can perform the other moves.
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
DCHECK(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
need_to_restore_root_ = true;
__ mov(kSavedValueRegister, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
need_to_restore_root_ = true;
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// This move will be done by restoring the saved value to the destination.
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
DCHECK(in_cycle_);
DCHECK(saved_destination_ != NULL);
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
__ str(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ ldr(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (!destination_operand.OffsetIsUint12Encodable()) {
// ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
__ vstr(kScratchDoubleReg.low(), destination_operand);
} else {
__ ldr(ip, source_operand);
__ str(ip, destination_operand);
}
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DwVfpRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Vmov(result, v, ip);
} else {
DCHECK(destination->IsStackSlot());
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
}
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ vstr(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kScratchDoubleReg was used to break the cycle.
__ vpush(kScratchDoubleReg);
__ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand);
__ vpop(kScratchDoubleReg);
} else {
__ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand);
}
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
#undef __
} // namespace internal
} // namespace v8

View File

@ -1,63 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver final BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
// We use the root register as a scratch in a few places. When that happens,
// this flag is set to indicate that it needs to be restored.
bool need_to_restore_root_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_

View File

@ -1 +0,0 @@
rmcilroy@chromium.org

View File

@ -1,71 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/crankshaft/arm64/delayed-masm-arm64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
DelayedMasm::DelayedMasm(LCodeGen* owner, MacroAssembler* masm,
const Register& scratch_register)
: cgen_(owner),
masm_(masm),
scratch_register_(scratch_register),
scratch_register_used_(false),
pending_(kNone),
saved_value_(0) {
#ifdef DEBUG
pending_register_ = no_reg;
pending_value_ = 0;
pending_pc_ = 0;
scratch_register_acquired_ = false;
#endif
}
void DelayedMasm::EndDelayedUse() {
EmitPending();
DCHECK(!scratch_register_acquired_);
ResetSavedValue();
}
void DelayedMasm::Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode) {
EmitPending();
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
__ Mov(rd, operand, discard_mode);
}
void DelayedMasm::Fmov(VRegister fd, VRegister fn) {
EmitPending();
__ Fmov(fd, fn);
}
void DelayedMasm::Fmov(VRegister fd, double imm) {
EmitPending();
__ Fmov(fd, imm);
}
void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
EmitPending();
DCHECK(!IsScratchRegister(result) || scratch_register_acquired_);
__ LoadObject(result, object);
}
void DelayedMasm::InitializeRootRegister() { masm_->InitializeRootRegister(); }
#undef __
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_

View File

@ -1,199 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_ARM64
#include "src/crankshaft/arm64/delayed-masm-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
DCHECK((src->IsStackSlot() && dst->IsStackSlot()) ||
(src->IsDoubleStackSlot() && dst->IsDoubleStackSlot()));
MemOperand src_operand = cgen_->ToMemOperand(src);
MemOperand dst_operand = cgen_->ToMemOperand(dst);
if (pending_ == kStackSlotMove) {
DCHECK(pending_pc_ == masm_->pc_offset());
UseScratchRegisterScope scope(masm_);
DoubleRegister temp1 = scope.AcquireD();
DoubleRegister temp2 = scope.AcquireD();
switch (MemOperand::AreConsistentForPair(pending_address_src_,
src_operand)) {
case MemOperand::kNotPair:
__ Ldr(temp1, pending_address_src_);
__ Ldr(temp2, src_operand);
break;
case MemOperand::kPairAB:
__ Ldp(temp1, temp2, pending_address_src_);
break;
case MemOperand::kPairBA:
__ Ldp(temp2, temp1, src_operand);
break;
}
switch (MemOperand::AreConsistentForPair(pending_address_dst_,
dst_operand)) {
case MemOperand::kNotPair:
__ Str(temp1, pending_address_dst_);
__ Str(temp2, dst_operand);
break;
case MemOperand::kPairAB:
__ Stp(temp1, temp2, pending_address_dst_);
break;
case MemOperand::kPairBA:
__ Stp(temp2, temp1, dst_operand);
break;
}
ResetPending();
return;
}
EmitPending();
pending_ = kStackSlotMove;
pending_address_src_ = src_operand;
pending_address_dst_ = dst_operand;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
DCHECK(!scratch_register_acquired_);
if ((pending_ == kStoreConstant) && (value == pending_value_)) {
MemOperand::PairResult result =
MemOperand::AreConsistentForPair(pending_address_dst_, operand);
if (result != MemOperand::kNotPair) {
const MemOperand& dst =
(result == MemOperand::kPairAB) ?
pending_address_dst_ :
operand;
DCHECK(pending_pc_ == masm_->pc_offset());
if (pending_value_ == 0) {
__ Stp(xzr, xzr, dst);
} else {
SetSavedValue(pending_value_);
__ Stp(ScratchRegister(), ScratchRegister(), dst);
}
ResetPending();
return;
}
}
EmitPending();
pending_ = kStoreConstant;
pending_address_dst_ = operand;
pending_value_ = value;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) {
if ((pending_ == kLoad) &&
pending_register_.IsSameSizeAndType(rd)) {
switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) {
case MemOperand::kNotPair:
break;
case MemOperand::kPairAB:
DCHECK(pending_pc_ == masm_->pc_offset());
DCHECK(!IsScratchRegister(pending_register_) ||
scratch_register_acquired_);
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
__ Ldp(pending_register_, rd, pending_address_src_);
ResetPending();
return;
case MemOperand::kPairBA:
DCHECK(pending_pc_ == masm_->pc_offset());
DCHECK(!IsScratchRegister(pending_register_) ||
scratch_register_acquired_);
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
__ Ldp(rd, pending_register_, operand);
ResetPending();
return;
}
}
EmitPending();
pending_ = kLoad;
pending_register_ = rd;
pending_address_src_ = operand;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
if ((pending_ == kStore) &&
pending_register_.IsSameSizeAndType(rd)) {
switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) {
case MemOperand::kNotPair:
break;
case MemOperand::kPairAB:
DCHECK(pending_pc_ == masm_->pc_offset());
__ Stp(pending_register_, rd, pending_address_dst_);
ResetPending();
return;
case MemOperand::kPairBA:
DCHECK(pending_pc_ == masm_->pc_offset());
__ Stp(rd, pending_register_, operand);
ResetPending();
return;
}
}
EmitPending();
pending_ = kStore;
pending_register_ = rd;
pending_address_dst_ = operand;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::EmitPending() {
DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
switch (pending_) {
case kNone:
return;
case kStoreConstant:
if (pending_value_ == 0) {
__ Str(xzr, pending_address_dst_);
} else {
SetSavedValue(pending_value_);
__ Str(ScratchRegister(), pending_address_dst_);
}
break;
case kLoad:
DCHECK(!IsScratchRegister(pending_register_) ||
scratch_register_acquired_);
__ Ldr(pending_register_, pending_address_src_);
break;
case kStore:
__ Str(pending_register_, pending_address_dst_);
break;
case kStackSlotMove: {
UseScratchRegisterScope scope(masm_);
DoubleRegister temp = scope.AcquireD();
__ Ldr(temp, pending_address_src_);
__ Str(temp, pending_address_dst_);
break;
}
}
ResetPending();
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -1,154 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
// This class delays the generation of some instructions. This way, we have a
// chance to merge two instructions in one (with load/store pair).
// Each instruction must either:
// - merge with the pending instruction and generate just one instruction.
// - emit the pending instruction and then generate the instruction (or set the
// pending instruction).
class DelayedMasm BASE_EMBEDDED {
public:
inline DelayedMasm(LCodeGen* owner, MacroAssembler* masm,
const Register& scratch_register);
~DelayedMasm() {
DCHECK(!scratch_register_acquired_);
DCHECK(!scratch_register_used_);
DCHECK(!pending());
}
inline void EndDelayedUse();
const Register& ScratchRegister() {
scratch_register_used_ = true;
return scratch_register_;
}
bool IsScratchRegister(const CPURegister& reg) {
return reg.Is(scratch_register_);
}
bool scratch_register_used() const { return scratch_register_used_; }
void reset_scratch_register_used() { scratch_register_used_ = false; }
// Acquire/Release scratch register for use outside this class.
void AcquireScratchRegister() {
EmitPending();
ResetSavedValue();
#ifdef DEBUG
DCHECK(!scratch_register_acquired_);
scratch_register_acquired_ = true;
#endif
}
void ReleaseScratchRegister() {
#ifdef DEBUG
DCHECK(scratch_register_acquired_);
scratch_register_acquired_ = false;
#endif
}
bool pending() { return pending_ != kNone; }
// Extra layer over the macro-assembler instructions (which emits the
// potential pending instruction).
inline void Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, double imm);
inline void LoadObject(Register result, Handle<Object> object);
// Instructions which try to merge which the pending instructions.
void StackSlotMove(LOperand* src, LOperand* dst);
// StoreConstant can only be used if the scratch register is not acquired.
void StoreConstant(uint64_t value, const MemOperand& operand);
void Load(const CPURegister& rd, const MemOperand& operand);
void Store(const CPURegister& rd, const MemOperand& operand);
// Emit the potential pending instruction.
void EmitPending();
// Reset the pending state.
void ResetPending() {
pending_ = kNone;
#ifdef DEBUG
pending_register_ = no_reg;
MemOperand tmp;
pending_address_src_ = tmp;
pending_address_dst_ = tmp;
pending_value_ = 0;
pending_pc_ = 0;
#endif
}
inline void InitializeRootRegister();
private:
// Set the saved value and load the ScratchRegister with it.
void SetSavedValue(uint64_t saved_value) {
DCHECK(saved_value != 0);
if (saved_value_ != saved_value) {
masm_->Mov(ScratchRegister(), saved_value);
saved_value_ = saved_value;
}
}
// Reset the saved value (i.e. the value of ScratchRegister is no longer
// known).
void ResetSavedValue() {
saved_value_ = 0;
}
LCodeGen* cgen_;
MacroAssembler* masm_;
// Register used to store a constant.
Register scratch_register_;
bool scratch_register_used_;
// Sometimes we store or load two values in two contiguous stack slots.
// In this case, we try to use the ldp/stp instructions to reduce code size.
// To be able to do that, instead of generating directly the instructions,
// we register with the following fields that an instruction needs to be
// generated. Then with the next instruction, if the instruction is
// consistent with the pending one for stp/ldp we generate ldp/stp. Else,
// if they are not consistent, we generate the pending instruction and we
// register the new instruction (which becomes pending).
// Enumeration of instructions which can be pending.
enum Pending {
kNone,
kStoreConstant,
kLoad, kStore,
kStackSlotMove
};
// The pending instruction.
Pending pending_;
// For kLoad, kStore: register which must be loaded/stored.
CPURegister pending_register_;
// For kLoad, kStackSlotMove: address of the load.
MemOperand pending_address_src_;
// For kStoreConstant, kStore, kStackSlotMove: address of the store.
MemOperand pending_address_dst_;
// For kStoreConstant: value to be stored.
uint64_t pending_value_;
// Value held into the ScratchRegister if the saved_value_ is not 0.
// For 0, we use xzr.
uint64_t saved_value_;
#ifdef DEBUG
// Address where the pending instruction must be generated. It's only used to
// check that nothing else has been generated since we set the pending
// instruction.
int pending_pc_;
// If true, the scratch register has been acquired outside this class. The
// scratch register can no longer be used for constants.
bool scratch_register_acquired_;
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,441 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
#define V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
#include "src/crankshaft/arm64/lithium-arm64.h"
#include "src/ast/scopes.h"
#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class BranchGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple),
pushed_arguments_(0) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
Scope* scope() const { return scope_; }
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
!info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
LinkRegisterStatus GetLinkRegisterState() const {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32(LOperand* op);
enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI>
Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
int JSShiftAmountFromLConstant(LOperand* constant) {
return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
}
// TODO(jbramley): Examine these helpers and check that they make sense.
// IsInteger32Constant returns true for smi constants, for example.
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
DoubleRegister ToDoubleRegister(LOperand* op) const;
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
// Return a double scratch register which can be used locally
// when generating code for a lithium instruction.
DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
Label* exit,
Label* allocation_entry);
void DoDeferredNumberTagU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredTaggedToI(LTaggedToI* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void DoGap(LGap* instr);
// Generic version of EmitBranch. It contains some code to avoid emitting a
// branch on the next emitted basic block where we could just fall-through.
// You shouldn't use that directly but rather consider one of the helper like
// LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
template<class InstrType>
void EmitBranchGeneric(InstrType instr,
const BranchGenerator& branch);
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
void EmitCompareAndBranch(InstrType instr,
Condition condition,
const Register& lhs,
const Operand& rhs);
template<class InstrType>
void EmitTestAndBranch(InstrType instr,
Condition condition,
const Register& value,
uint64_t mask);
template <class InstrType>
void EmitBranchIfNonZeroNumber(InstrType instr, const VRegister& value,
const VRegister& scratch);
template<class InstrType>
void EmitBranchIfHeapNumber(InstrType instr,
const Register& value);
template<class InstrType>
void EmitBranchIfRoot(InstrType instr,
const Register& value,
Heap::RootListIndex index);
// Emits optimized code to deep-copy the contents of statically known object
// graphs (e.g. object literal boilerplate). Expects a pointer to the
// allocated destination object in the result register, and a pointer to the
// source object in the source register.
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
Register scratch,
int* offset,
AllocationSiteMode mode);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
MemOperand BuildSeqStringOperand(Register string,
Register temp,
LOperand* index,
String::Encoding encoding);
void DeoptimizeBranch(LInstruction* instr, DeoptimizeReason deopt_reason,
BranchType branch_type, Register reg = NoReg,
int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
void Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type = NULL);
void DeoptimizeIf(Condition cond, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfZero(Register rt, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfNegative(Register rt, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfSmi(Register rt, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr, DeoptimizeReason deopt_reason);
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr, DeoptimizeReason deopt_reason);
void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
DeoptimizeReason deopt_reason);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
DeoptimizeReason deopt_reason);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
Register scratch,
bool key_is_smi,
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
int base_offset);
MemOperand PrepareKeyedArrayOperand(Register base,
Register elements,
Register key,
bool key_is_tagged,
ElementsKind elements_kind,
Representation representation,
int base_offset);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
bool HasAllocatedStackSlots() const {
return chunk()->HasAllocatedStackSlots();
}
int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
int GetTotalFrameSlotCount() const {
return chunk()->GetTotalFrameSlotCount();
}
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
void GenerateBodyInstructionPre(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
Register scratch2, Register scratch3);
// Generate a direct call to a known function. Expects the function
// to be in x1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
bool is_tail_call, LInstruction* instr);
// Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) override;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table itself is
// emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
// The number of arguments pushed onto the stack, either by this block or by a
// predecessor.
int pushed_arguments_;
void RecordPushedArgumentsDelta(int delta) {
pushed_arguments_ += delta;
DCHECK(pushed_arguments_ >= 0);
}
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen);
~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
// This is the abstract class used by EmitBranchGeneric.
// It is used to emit code for conditional branching. The Emit() function
// emits code to branch when the condition holds and EmitInverted() emits
// the branch when the inverted condition is verified.
//
// For actual examples of condition see the concrete implementation in
// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
class BranchGenerator BASE_EMBEDDED {
public:
explicit BranchGenerator(LCodeGen* codegen)
: codegen_(codegen) { }
virtual ~BranchGenerator() { }
virtual void Emit(Label* label) const = 0;
virtual void EmitInverted(Label* label) const = 0;
protected:
MacroAssembler* masm() const { return codegen_->masm(); }
LCodeGen* codegen_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_

View File

@ -1,306 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
#include "src/crankshaft/arm64/delayed-masm-arm64-inl.h"
#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM((&masm_))
DelayedGapMasm::DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm)
: DelayedMasm(owner, masm, root) {
// We use the root register as an extra scratch register.
// The root register has two advantages:
// - It is not in crankshaft allocatable registers list, so it can't
// interfere with the allocatable registers.
// - We don't need to push it on the stack, as we can reload it with its
// value once we have finish.
}
DelayedGapMasm::~DelayedGapMasm() {}
void DelayedGapMasm::EndDelayedUse() {
DelayedMasm::EndDelayedUse();
if (scratch_register_used()) {
DCHECK(ScratchRegister().Is(root));
DCHECK(!pending());
InitializeRootRegister();
reset_scratch_register_used();
}
}
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()),
root_index_(0), in_cycle_(false), saved_destination_(NULL) {
}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
DCHECK(moves_.is_empty());
DCHECK(!masm_.pending());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when we reach this move again.
PerformMove(i);
if (in_cycle_) RestoreValue();
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
if (!move.IsEliminated()) {
DCHECK(move.source()->IsConstantOperand());
EmitMove(i);
}
}
__ EndDelayedUse();
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
LMoveOperands& current_move = moves_[index];
DCHECK(!current_move.IsPending());
DCHECK(!current_move.IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
DCHECK(current_move.source() != NULL); // Otherwise it will look eliminated.
LOperand* destination = current_move.destination();
current_move.set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
current_move.set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
void LGapResolver::BreakCycle(int index) {
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
DCHECK(!in_cycle_);
// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
// broken and we can perform the other moves.
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
AcquireSavedValueRegister();
__ Mov(SavedValueRegister(), cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
AcquireSavedValueRegister();
__ Load(SavedValueRegister(), cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// Mark this move as resolved.
// This move will be actually performed by moving the saved value to this
// move's destination in LGapResolver::RestoreValue().
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
DCHECK(in_cycle_);
DCHECK(saved_destination_ != NULL);
if (saved_destination_->IsRegister()) {
__ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister());
ReleaseSavedValueRegister();
} else if (saved_destination_->IsStackSlot()) {
__ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_));
ReleaseSavedValueRegister();
} else if (saved_destination_->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(saved_destination_),
SavedFPValueRegister());
} else if (saved_destination_->IsDoubleStackSlot()) {
__ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ Mov(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
__ Store(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ Load(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
EmitStackSlotMove(index);
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ Mov(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(dst, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
__ Fmov(result, cgen_->ToDouble(constant_source));
} else {
DCHECK(destination->IsStackSlot());
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsSmi(constant_source)) {
Smi* smi = cgen_->ToSmi(constant_source);
__ StoreConstant(reinterpret_cast<intptr_t>(smi),
cgen_->ToMemOperand(destination));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ StoreConstant(cgen_->ToInteger32(constant_source),
cgen_->ToMemOperand(destination));
} else {
Handle<Object> handle = cgen_->ToHandle(constant_source);
AllowDeferredHandleDereference smi_object_check;
if (handle->IsSmi()) {
Object* obj = *handle;
DCHECK(!obj->IsHeapObject());
__ StoreConstant(reinterpret_cast<intptr_t>(obj),
cgen_->ToMemOperand(destination));
} else {
AcquireSavedValueRegister();
__ LoadObject(SavedValueRegister(), handle);
__ Store(SavedValueRegister(), cgen_->ToMemOperand(destination));
ReleaseSavedValueRegister();
}
}
}
} else if (source->IsDoubleRegister()) {
DoubleRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ Store(src, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
MemOperand src = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ Load(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
EmitStackSlotMove(index);
}
} else {
UNREACHABLE();
}
// The move has been emitted, we can eliminate it.
moves_[index].Eliminate();
}
} // namespace internal
} // namespace v8

View File

@ -1,94 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#include "src/crankshaft/arm64/delayed-masm-arm64.h"
#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class DelayedGapMasm : public DelayedMasm {
public:
DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm);
~DelayedGapMasm();
void EndDelayedUse();
};
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Emit a move from one stack slot to another.
void EmitStackSlotMove(int index) {
masm_.StackSlotMove(moves_[index].source(), moves_[index].destination());
}
// Verify the move list before performing moves.
void Verify();
// Registers used to solve cycles.
const Register& SavedValueRegister() {
DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
masm_.ScratchRegister().code()));
return masm_.ScratchRegister();
}
// The scratch register is used to break cycles and to store constant.
// These two methods switch from one mode to the other.
void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); }
void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); }
const VRegister& SavedFPValueRegister() {
// We use the Crankshaft floating-point scratch register to break a cycle
// involving double values as the MacroAssembler will not need it for the
// operations performed by the gap resolver.
DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
crankshaft_fp_scratch.code()));
return crankshaft_fp_scratch;
}
LCodeGen* cgen_;
DelayedGapMasm masm_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_

View File

@ -1,45 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
: name_(name), info_(info), zone_(info->isolate()->allocator(), ZONE_NAME) {
if (FLAG_hydrogen_stats) {
info_zone_start_allocation_size_ = info->zone()->allocation_size();
timer_.Start();
}
}
CompilationPhase::~CompilationPhase() {
if (FLAG_hydrogen_stats) {
size_t size = zone()->allocation_size();
size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
}
}
bool CompilationPhase::ShouldProduceTraceOutput() const {
// Trace if the appropriate trace flag is set and the phase name's first
// character is in the FLAG_trace_phase command line parameter.
AllowHandleDereference allow_deref;
bool tracing_on =
info()->IsStub()
? FLAG_trace_hydrogen_stubs
: (FLAG_trace_hydrogen &&
info()->shared_info()->PassesFilter(FLAG_trace_hydrogen_filter));
return (tracing_on &&
base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) !=
NULL);
}
} // namespace internal
} // namespace v8

View File

@ -1,42 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_COMPILATION_PHASE_H_
#define V8_CRANKSHAFT_COMPILATION_PHASE_H_
#include "src/allocation.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-info.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
class CompilationPhase BASE_EMBEDDED {
public:
CompilationPhase(const char* name, CompilationInfo* info);
~CompilationPhase();
protected:
bool ShouldProduceTraceOutput() const;
const char* name() const { return name_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
Zone* zone() { return &zone_; }
private:
const char* name_;
CompilationInfo* info_;
Zone zone_;
size_t info_zone_start_allocation_size_;
base::ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_COMPILATION_PHASE_H_

View File

@ -1,73 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
#define V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
enum HAliasing {
kMustAlias,
kMayAlias,
kNoAlias
};
// Defines the interface to alias analysis for the rest of the compiler.
// A simple implementation can use only local reasoning, but a more powerful
// analysis might employ points-to analysis.
class HAliasAnalyzer : public ZoneObject {
public:
// Simple alias analysis distinguishes allocations, parameters,
// and constants using only local reasoning.
HAliasing Query(HValue* a, HValue* b) {
// The same SSA value always references the same object.
if (a == b) return kMustAlias;
if (a->IsAllocate() || a->IsInnerAllocatedObject()) {
// Two non-identical allocations can never be aliases.
if (b->IsAllocate()) return kNoAlias;
if (b->IsInnerAllocatedObject()) return kNoAlias;
// An allocation can never alias a parameter or a constant.
if (b->IsParameter()) return kNoAlias;
if (b->IsConstant()) return kNoAlias;
}
if (b->IsAllocate() || b->IsInnerAllocatedObject()) {
// An allocation can never alias a parameter or a constant.
if (a->IsParameter()) return kNoAlias;
if (a->IsConstant()) return kNoAlias;
}
// Constant objects can be distinguished statically.
if (a->IsConstant() && b->IsConstant()) {
return a->Equals(b) ? kMustAlias : kNoAlias;
}
return kMayAlias;
}
// Checks whether the objects referred to by the given instructions may
// ever be aliases. Note that this is more conservative than checking
// {Query(a, b) == kMayAlias}, since this method considers kMustAlias
// objects to also be may-aliasing.
inline bool MayAlias(HValue* a, HValue* b) {
return Query(a, b) != kNoAlias;
}
inline bool MustAlias(HValue* a, HValue* b) {
return Query(a, b) == kMustAlias;
}
inline bool NoAlias(HValue* a, HValue* b) {
return Query(a, b) == kNoAlias;
}
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_

View File

@ -1,479 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-bce.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
// We try to "factor up" HBoundsCheck instructions towards the root of the
// dominator tree.
// For now we handle checks where the index is like "exp + int32value".
// If in the dominator tree we check "exp + v1" and later (dominated)
// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
// v2 > v1 we can use v2 in the 1st check and again remove the second.
// To do so we keep a dictionary of all checks where the key if the pair
// "exp, length".
// The class BoundsCheckKey represents this key.
class BoundsCheckKey : public ZoneObject {
public:
HValue* IndexBase() const { return index_base_; }
HValue* Length() const { return length_; }
uint32_t Hash() {
return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
}
static BoundsCheckKey* Create(Zone* zone,
HBoundsCheck* check,
int32_t* offset) {
if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
HValue* index_base = NULL;
HConstant* constant = NULL;
bool is_sub = false;
if (check->index()->IsAdd()) {
HAdd* index = HAdd::cast(check->index());
if (index->left()->IsConstant()) {
constant = HConstant::cast(index->left());
index_base = index->right();
} else if (index->right()->IsConstant()) {
constant = HConstant::cast(index->right());
index_base = index->left();
}
} else if (check->index()->IsSub()) {
HSub* index = HSub::cast(check->index());
is_sub = true;
if (index->right()->IsConstant()) {
constant = HConstant::cast(index->right());
index_base = index->left();
}
} else if (check->index()->IsConstant()) {
index_base = check->block()->graph()->GetConstant0();
constant = HConstant::cast(check->index());
}
if (constant != NULL && constant->HasInteger32Value() &&
constant->Integer32Value() != kMinInt) {
*offset = is_sub ? - constant->Integer32Value()
: constant->Integer32Value();
} else {
*offset = 0;
index_base = check->index();
}
return new(zone) BoundsCheckKey(index_base, check->length());
}
private:
BoundsCheckKey(HValue* index_base, HValue* length)
: index_base_(index_base),
length_(length) { }
HValue* index_base_;
HValue* length_;
DISALLOW_COPY_AND_ASSIGN(BoundsCheckKey);
};
// Data about each HBoundsCheck that can be eliminated or moved.
// It is the "value" in the dictionary indexed by "base-index, length"
// (the key is BoundsCheckKey).
// We scan the code with a dominator tree traversal.
// Traversing the dominator tree we keep a stack (implemented as a singly
// linked list) of "data" for each basic block that contains a relevant check
// with the same key (the dictionary holds the head of the list).
// We also keep all the "data" created for a given basic block in a list, and
// use it to "clean up" the dictionary when backtracking in the dominator tree
// traversal.
// Doing this each dictionary entry always directly points to the check that
// is dominating the code being examined now.
// We also track the current "offset" of the index expression and use it to
// decide if any check is already "covered" (so it can be removed) or not.
class BoundsCheckBbData: public ZoneObject {
public:
BoundsCheckKey* Key() const { return key_; }
int32_t LowerOffset() const { return lower_offset_; }
int32_t UpperOffset() const { return upper_offset_; }
HBasicBlock* BasicBlock() const { return basic_block_; }
HBoundsCheck* LowerCheck() const { return lower_check_; }
HBoundsCheck* UpperCheck() const { return upper_check_; }
BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
bool OffsetIsCovered(int32_t offset) const {
return offset >= LowerOffset() && offset <= UpperOffset();
}
bool HasSingleCheck() { return lower_check_ == upper_check_; }
void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
BoundsCheckBbData* data = FatherInDominatorTree();
while (data != NULL && data->UpperCheck() == check) {
DCHECK(data->upper_offset_ < offset);
data->upper_offset_ = offset;
data = data->FatherInDominatorTree();
}
}
void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) {
BoundsCheckBbData* data = FatherInDominatorTree();
while (data != NULL && data->LowerCheck() == check) {
DCHECK(data->lower_offset_ > offset);
data->lower_offset_ = offset;
data = data->FatherInDominatorTree();
}
}
// The goal of this method is to modify either upper_offset_ or
// lower_offset_ so that also new_offset is covered (the covered
// range grows).
//
// The precondition is that new_check follows UpperCheck() and
// LowerCheck() in the same basic block, and that new_offset is not
// covered (otherwise we could simply remove new_check).
//
// If HasSingleCheck() is true then new_check is added as "second check"
// (either upper or lower; note that HasSingleCheck() becomes false).
// Otherwise one of the current checks is modified so that it also covers
// new_offset, and new_check is removed.
void CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
DCHECK(new_check->index()->representation().IsSmiOrInteger32());
bool keep_new_check = false;
if (new_offset > upper_offset_) {
upper_offset_ = new_offset;
if (HasSingleCheck()) {
keep_new_check = true;
upper_check_ = new_check;
} else {
TightenCheck(upper_check_, new_check, new_offset);
UpdateUpperOffsets(upper_check_, upper_offset_);
}
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
if (HasSingleCheck()) {
keep_new_check = true;
lower_check_ = new_check;
} else {
TightenCheck(lower_check_, new_check, new_offset);
UpdateLowerOffsets(lower_check_, lower_offset_);
}
} else {
// Should never have called CoverCheck() in this case.
UNREACHABLE();
}
if (!keep_new_check) {
if (FLAG_trace_bce) {
base::OS::Print("Eliminating check #%d after tightening\n",
new_check->id());
}
new_check->block()->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
new_check->DeleteAndReplaceWith(new_check->ActualValue());
} else {
HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
: lower_check_;
if (FLAG_trace_bce) {
base::OS::Print("Moving second check #%d after first check #%d\n",
new_check->id(), first_check->id());
}
// The length is guaranteed to be live at first_check.
DCHECK(new_check->length() == first_check->length());
HInstruction* old_position = new_check->next();
new_check->Unlink();
new_check->InsertAfter(first_check);
MoveIndexIfNecessary(new_check->index(), new_check, old_position);
}
}
BoundsCheckBbData(BoundsCheckKey* key,
int32_t lower_offset,
int32_t upper_offset,
HBasicBlock* bb,
HBoundsCheck* lower_check,
HBoundsCheck* upper_check,
BoundsCheckBbData* next_in_bb,
BoundsCheckBbData* father_in_dt)
: key_(key),
lower_offset_(lower_offset),
upper_offset_(upper_offset),
basic_block_(bb),
lower_check_(lower_check),
upper_check_(upper_check),
next_in_bb_(next_in_bb),
father_in_dt_(father_in_dt) { }
private:
BoundsCheckKey* key_;
int32_t lower_offset_;
int32_t upper_offset_;
HBasicBlock* basic_block_;
HBoundsCheck* lower_check_;
HBoundsCheck* upper_check_;
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;
void MoveIndexIfNecessary(HValue* index_raw,
HBoundsCheck* insert_before,
HInstruction* end_of_scan_range) {
// index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
// HConstant(offset) or index_base directly.
// In the latter case, no need to move anything.
if (index_raw->IsAdd() || index_raw->IsSub()) {
HArithmeticBinaryOperation* index =
HArithmeticBinaryOperation::cast(index_raw);
HValue* left_input = index->left();
HValue* right_input = index->right();
HValue* context = index->context();
bool must_move_index = false;
bool must_move_left_input = false;
bool must_move_right_input = false;
bool must_move_context = false;
for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
if (cursor == left_input) must_move_left_input = true;
if (cursor == right_input) must_move_right_input = true;
if (cursor == context) must_move_context = true;
if (cursor == index) must_move_index = true;
if (cursor->previous() == NULL) {
cursor = cursor->block()->dominator()->end();
} else {
cursor = cursor->previous();
}
}
if (must_move_index) {
index->Unlink();
index->InsertBefore(insert_before);
}
// The BCE algorithm only selects mergeable bounds checks that share
// the same "index_base", so we'll only ever have to move constants.
if (must_move_left_input) {
HConstant::cast(left_input)->Unlink();
HConstant::cast(left_input)->InsertBefore(index);
}
if (must_move_right_input) {
HConstant::cast(right_input)->Unlink();
HConstant::cast(right_input)->InsertBefore(index);
}
if (must_move_context) {
// Contexts are always constants.
HConstant::cast(context)->Unlink();
HConstant::cast(context)->InsertBefore(index);
}
} else if (index_raw->IsConstant()) {
HConstant* index = HConstant::cast(index_raw);
bool must_move = false;
for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
if (cursor == index) must_move = true;
if (cursor->previous() == NULL) {
cursor = cursor->block()->dominator()->end();
} else {
cursor = cursor->previous();
}
}
if (must_move) {
index->Unlink();
index->InsertBefore(insert_before);
}
}
}
void TightenCheck(HBoundsCheck* original_check,
HBoundsCheck* tighter_check,
int32_t new_offset) {
DCHECK(original_check->length() == tighter_check->length());
MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
original_check->ReplaceAllUsesWith(original_check->index());
original_check->SetOperandAt(0, tighter_check->index());
if (FLAG_trace_bce) {
base::OS::Print("Tightened check #%d with offset %d from #%d\n",
original_check->id(), new_offset, tighter_check->id());
}
}
DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
};
static bool BoundsCheckKeyMatch(void* key1, void* key2) {
BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
}
BoundsCheckTable::BoundsCheckTable(Zone* zone)
: CustomMatcherZoneHashMap(BoundsCheckKeyMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)) {}
BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
Zone* zone) {
return reinterpret_cast<BoundsCheckBbData**>(
&(CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
ZoneAllocationPolicy(zone))
->value));
}
void BoundsCheckTable::Insert(BoundsCheckKey* key,
BoundsCheckBbData* data,
Zone* zone) {
CustomMatcherZoneHashMap::LookupOrInsert(key, key->Hash(),
ZoneAllocationPolicy(zone))
->value = data;
}
void BoundsCheckTable::Delete(BoundsCheckKey* key) {
Remove(key, key->Hash());
}
class HBoundsCheckEliminationState {
public:
HBasicBlock* block_;
BoundsCheckBbData* bb_data_list_;
int index_;
};
// Eliminates checks in bb and recursively in the dominated blocks.
// Also replace the results of check instructions with the original value, if
// the result is used. This is safe now, since we don't do code motion after
// this point. It enables better register allocation since the value produced
// by check instructions is really a copy of the original value.
void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
HBasicBlock* entry) {
// Allocate the stack.
HBoundsCheckEliminationState* stack =
zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
// Explicitly push the entry block.
stack[0].block_ = entry;
stack[0].bb_data_list_ = PreProcessBlock(entry);
stack[0].index_ = 0;
int stack_depth = 1;
// Implement depth-first traversal with a stack.
while (stack_depth > 0) {
int current = stack_depth - 1;
HBoundsCheckEliminationState* state = &stack[current];
const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
if (state->index_ < children->length()) {
// Recursively visit children blocks.
HBasicBlock* child = children->at(state->index_++);
int next = stack_depth++;
stack[next].block_ = child;
stack[next].bb_data_list_ = PreProcessBlock(child);
stack[next].index_ = 0;
} else {
// Finished with all children; post process the block.
PostProcessBlock(state->block_, state->bb_data_list_);
stack_depth--;
}
}
}
BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
HBasicBlock* bb) {
BoundsCheckBbData* bb_data_list = NULL;
for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
HInstruction* i = it.Current();
if (!i->IsBoundsCheck()) continue;
HBoundsCheck* check = HBoundsCheck::cast(i);
int32_t offset = 0;
BoundsCheckKey* key =
BoundsCheckKey::Create(zone(), check, &offset);
if (key == NULL) continue;
BoundsCheckBbData** data_p = table_.LookupOrInsert(key, zone());
BoundsCheckBbData* data = *data_p;
if (data == NULL) {
bb_data_list = new(zone()) BoundsCheckBbData(key,
offset,
offset,
bb,
check,
check,
bb_data_list,
NULL);
*data_p = bb_data_list;
if (FLAG_trace_bce) {
base::OS::Print("Fresh bounds check data for block #%d: [%d]\n",
bb->block_id(), offset);
}
} else if (data->OffsetIsCovered(offset)) {
bb->graph()->isolate()->counters()->
bounds_checks_eliminated()->Increment();
if (FLAG_trace_bce) {
base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
check->id(), offset);
}
check->DeleteAndReplaceWith(check->ActualValue());
} else if (data->BasicBlock() == bb) {
// TODO(jkummerow): I think the following logic would be preferable:
// if (data->Basicblock() == bb ||
// graph()->use_optimistic_licm() ||
// bb->IsLoopSuccessorDominator()) {
// data->CoverCheck(check, offset)
// } else {
// /* add pristine BCBbData like in (data == NULL) case above */
// }
// Even better would be: distinguish between read-only dominator-imposed
// knowledge and modifiable upper/lower checks.
// What happens currently is that the first bounds check in a dominated
// block will stay around while any further checks are hoisted out,
// which doesn't make sense. Investigate/fix this in a future CL.
data->CoverCheck(check, offset);
} else if (graph()->use_optimistic_licm() ||
bb->IsLoopSuccessorDominator()) {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
: data->LowerOffset();
int32_t new_upper_offset = offset > data->UpperOffset()
? offset
: data->UpperOffset();
bb_data_list = new(zone()) BoundsCheckBbData(key,
new_lower_offset,
new_upper_offset,
bb,
data->LowerCheck(),
data->UpperCheck(),
bb_data_list,
data);
if (FLAG_trace_bce) {
base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
bb->block_id(), new_lower_offset, new_upper_offset);
}
table_.Insert(key, bb_data_list, zone());
}
}
return bb_data_list;
}
void HBoundsCheckEliminationPhase::PostProcessBlock(
HBasicBlock* block, BoundsCheckBbData* data) {
while (data != NULL) {
if (data->FatherInDominatorTree()) {
table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
table_.Delete(data->Key());
}
data = data->NextInBasicBlock();
}
}
} // namespace internal
} // namespace v8

View File

@ -1,52 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_BCE_H_
#define V8_CRANKSHAFT_HYDROGEN_BCE_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class BoundsCheckBbData;
class BoundsCheckKey;
class BoundsCheckTable : private CustomMatcherZoneHashMap {
public:
explicit BoundsCheckTable(Zone* zone);
INLINE(BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone));
INLINE(void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone));
INLINE(void Delete(BoundsCheckKey* key));
private:
DISALLOW_COPY_AND_ASSIGN(BoundsCheckTable);
};
class HBoundsCheckEliminationPhase : public HPhase {
public:
explicit HBoundsCheckEliminationPhase(HGraph* graph)
: HPhase("H_Bounds checks elimination", graph), table_(zone()) { }
void Run() {
EliminateRedundantBoundsChecks(graph()->entry_block());
}
private:
void EliminateRedundantBoundsChecks(HBasicBlock* bb);
BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb);
void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data);
BoundsCheckTable table_;
DISALLOW_COPY_AND_ASSIGN(HBoundsCheckEliminationPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_BCE_H_

View File

@ -1,59 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-canonicalize.h"
#include "src/counters.h"
#include "src/crankshaft/hydrogen-redundant-phi.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HCanonicalizePhase::Run() {
const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
// Before removing no-op instructions, save their semantic value.
// We must be careful not to set the flag unnecessarily, because GVN
// cannot identify two instructions when their flag value differs.
for (int i = 0; i < blocks->length(); ++i) {
for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (instr->IsArithmeticBinaryOperation()) {
if (instr->representation().IsInteger32()) {
if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToInt32)) {
instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
}
} else if (instr->representation().IsSmi()) {
if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToSmi)) {
instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
} else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
HInstruction::kTruncatingToInt32)) {
// Avoid redundant minus zero check
instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
}
}
}
}
}
// Perform actual Canonicalization pass.
HRedundantPhiEliminationPhase redundant_phi_eliminator(graph());
for (int i = 0; i < blocks->length(); ++i) {
// Eliminate redundant phis in the block first; changes to their inputs
// might have made them redundant, and eliminating them creates more
// opportunities for constant folding and strength reduction.
redundant_phi_eliminator.ProcessBlock(blocks->at(i));
// Now canonicalize each instruction.
for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
HValue* value = instr->Canonicalize();
if (value != instr) instr->DeleteAndReplaceWith(value);
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,29 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
#define V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HCanonicalizePhase : public HPhase {
public:
explicit HCanonicalizePhase(HGraph* graph)
: HPhase("H_Canonicalize", graph) { }
void Run();
private:
DISALLOW_COPY_AND_ASSIGN(HCanonicalizePhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_

View File

@ -1,913 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-check-elimination.h"
#include "src/crankshaft/hydrogen-alias-analysis.h"
#include "src/crankshaft/hydrogen-flow-engine.h"
#include "src/objects-inl.h"
#define GLOBAL 1
// Only collect stats in debug mode.
#if DEBUG
#define INC_STAT(x) phase_->x++
#else
#define INC_STAT(x)
#endif
// For code de-uglification.
#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
namespace v8 {
namespace internal {
typedef const UniqueSet<Map>* MapSet;
struct HCheckTableEntry {
enum State {
// We have seen a map check (i.e. an HCheckMaps) for these maps, so we can
// use this information to eliminate further map checks, elements kind
// transitions, etc.
CHECKED,
// Same as CHECKED, but we also know that these maps are stable.
CHECKED_STABLE,
// These maps are stable, but not checked (i.e. we learned this via field
// type tracking or from a constant, or they were initially CHECKED_STABLE,
// but became UNCHECKED_STABLE because of an instruction that changes maps
// or elements kind), and we need a stability check for them in order to use
// this information for check elimination (which turns them back to
// CHECKED_STABLE).
UNCHECKED_STABLE
};
static const char* State2String(State state) {
switch (state) {
case CHECKED: return "checked";
case CHECKED_STABLE: return "checked stable";
case UNCHECKED_STABLE: return "unchecked stable";
}
UNREACHABLE();
}
static State StateMerge(State state1, State state2) {
if (state1 == state2) return state1;
if ((state1 == CHECKED && state2 == CHECKED_STABLE) ||
(state2 == CHECKED && state1 == CHECKED_STABLE)) {
return CHECKED;
}
DCHECK((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) ||
(state2 == CHECKED_STABLE && state1 == UNCHECKED_STABLE));
return UNCHECKED_STABLE;
}
HValue* object_; // The object being approximated. NULL => invalid entry.
HInstruction* check_; // The last check instruction.
MapSet maps_; // The set of known maps for the object.
State state_; // The state of this entry.
};
// The main data structure used during check elimination, which stores a
// set of known maps for each object.
class HCheckTable : public ZoneObject {
public:
static const int kMaxTrackedObjects = 16;
explicit HCheckTable(HCheckEliminationPhase* phase)
: phase_(phase),
cursor_(0),
size_(0) {
}
// The main processing of instructions.
HCheckTable* Process(HInstruction* instr, Zone* zone) {
switch (instr->opcode()) {
case HValue::kCheckMaps: {
ReduceCheckMaps(HCheckMaps::cast(instr));
break;
}
case HValue::kLoadNamedField: {
ReduceLoadNamedField(HLoadNamedField::cast(instr));
break;
}
case HValue::kStoreNamedField: {
ReduceStoreNamedField(HStoreNamedField::cast(instr));
break;
}
case HValue::kCompareMap: {
ReduceCompareMap(HCompareMap::cast(instr));
break;
}
case HValue::kCompareObjectEqAndBranch: {
ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr));
break;
}
case HValue::kIsStringAndBranch: {
ReduceIsStringAndBranch(HIsStringAndBranch::cast(instr));
break;
}
case HValue::kTransitionElementsKind: {
ReduceTransitionElementsKind(
HTransitionElementsKind::cast(instr));
break;
}
case HValue::kCheckHeapObject: {
ReduceCheckHeapObject(HCheckHeapObject::cast(instr));
break;
}
case HValue::kCheckInstanceType: {
ReduceCheckInstanceType(HCheckInstanceType::cast(instr));
break;
}
default: {
// If the instruction changes maps uncontrollably, drop everything.
if (instr->CheckChangesFlag(kOsrEntries)) {
Kill();
break;
}
if (instr->CheckChangesFlag(kElementsKind) ||
instr->CheckChangesFlag(kMaps)) {
KillUnstableEntries();
}
}
// Improvements possible:
// - eliminate redundant HCheckSmi instructions
// - track which values have been HCheckHeapObject'd
}
return this;
}
// Support for global analysis with HFlowEngine: Merge given state with
// the other incoming state.
static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
HCheckTable* pred_state, HBasicBlock* pred_block,
Zone* zone) {
if (pred_state == NULL || pred_block->IsUnreachable()) {
return succ_state;
}
if (succ_state == NULL) {
return pred_state->Copy(succ_block, pred_block, zone);
} else {
return succ_state->Merge(succ_block, pred_state, pred_block, zone);
}
}
// Support for global analysis with HFlowEngine: Given state merged with all
// the other incoming states, prepare it for use.
static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
Zone* zone) {
if (state == NULL) {
block->MarkUnreachable();
} else if (block->IsUnreachable()) {
state = NULL;
}
if (FLAG_trace_check_elimination) {
PrintF("Processing B%d, checkmaps-table:\n", block->block_id());
Print(state);
}
return state;
}
private:
// Copy state to successor block.
HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
HCheckTable* copy = new(zone) HCheckTable(phase_);
for (int i = 0; i < size_; i++) {
HCheckTableEntry* old_entry = &entries_[i];
DCHECK(old_entry->maps_->size() > 0);
HCheckTableEntry* new_entry = &copy->entries_[i];
new_entry->object_ = old_entry->object_;
new_entry->maps_ = old_entry->maps_;
new_entry->state_ = old_entry->state_;
// Keep the check if the existing check's block dominates the successor.
if (old_entry->check_ != NULL &&
old_entry->check_->block()->Dominates(succ)) {
new_entry->check_ = old_entry->check_;
} else {
// Leave it NULL till we meet a new check instruction for this object
// in the control flow.
new_entry->check_ = NULL;
}
}
copy->cursor_ = cursor_;
copy->size_ = size_;
// Create entries for succ block's phis.
if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
int pred_index = succ->PredecessorIndexOf(from_block);
for (int phi_index = 0;
phi_index < succ->phis()->length();
++phi_index) {
HPhi* phi = succ->phis()->at(phi_index);
HValue* phi_operand = phi->OperandAt(pred_index);
HCheckTableEntry* pred_entry = copy->Find(phi_operand);
if (pred_entry != NULL) {
// Create an entry for a phi in the table.
copy->Insert(phi, NULL, pred_entry->maps_, pred_entry->state_);
}
}
}
// Branch-sensitive analysis for certain comparisons may add more facts
// to the state for the successor on the true branch.
bool learned = false;
if (succ->predecessors()->length() == 1) {
HControlInstruction* end = succ->predecessors()->at(0)->end();
bool is_true_branch = end->SuccessorAt(0) == succ;
if (end->IsCompareMap()) {
HCompareMap* cmp = HCompareMap::cast(end);
HValue* object = cmp->value()->ActualValue();
HCheckTableEntry* entry = copy->Find(object);
if (is_true_branch) {
HCheckTableEntry::State state = cmp->map_is_stable()
? HCheckTableEntry::CHECKED_STABLE
: HCheckTableEntry::CHECKED;
// Learn on the true branch of if(CompareMap(x)).
if (entry == NULL) {
copy->Insert(object, cmp, cmp->map(), state);
} else {
entry->maps_ = new(zone) UniqueSet<Map>(cmp->map(), zone);
entry->check_ = cmp;
entry->state_ = state;
}
} else {
// Learn on the false branch of if(CompareMap(x)).
if (entry != NULL) {
EnsureChecked(entry, object, cmp);
UniqueSet<Map>* maps = entry->maps_->Copy(zone);
maps->Remove(cmp->map());
entry->maps_ = maps;
DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
}
}
learned = true;
} else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
// Learn on the true branch of if(CmpObjectEq(x, y)).
HCompareObjectEqAndBranch* cmp =
HCompareObjectEqAndBranch::cast(end);
HValue* left = cmp->left()->ActualValue();
HValue* right = cmp->right()->ActualValue();
HCheckTableEntry* le = copy->Find(left);
HCheckTableEntry* re = copy->Find(right);
if (le == NULL) {
if (re != NULL) {
copy->Insert(left, NULL, re->maps_, re->state_);
}
} else if (re == NULL) {
copy->Insert(right, NULL, le->maps_, le->state_);
} else {
EnsureChecked(le, cmp->left(), cmp);
EnsureChecked(re, cmp->right(), cmp);
le->maps_ = re->maps_ = le->maps_->Intersect(re->maps_, zone);
le->state_ = re->state_ = HCheckTableEntry::StateMerge(
le->state_, re->state_);
DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_);
DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_);
}
learned = true;
} else if (end->IsIsStringAndBranch()) {
HIsStringAndBranch* cmp = HIsStringAndBranch::cast(end);
HValue* object = cmp->value()->ActualValue();
HCheckTableEntry* entry = copy->Find(object);
if (is_true_branch) {
// Learn on the true branch of if(IsString(x)).
if (entry == NULL) {
copy->Insert(object, NULL, string_maps(),
HCheckTableEntry::CHECKED);
} else {
EnsureChecked(entry, object, cmp);
entry->maps_ = entry->maps_->Intersect(string_maps(), zone);
DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
}
} else {
// Learn on the false branch of if(IsString(x)).
if (entry != NULL) {
EnsureChecked(entry, object, cmp);
entry->maps_ = entry->maps_->Subtract(string_maps(), zone);
DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
}
}
}
// Learning on false branches requires storing negative facts.
}
if (FLAG_trace_check_elimination) {
PrintF("B%d checkmaps-table %s from B%d:\n",
succ->block_id(),
learned ? "learned" : "copied",
from_block->block_id());
Print(copy);
}
return copy;
}
// Merge this state with the other incoming state.
HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
HBasicBlock* pred_block, Zone* zone) {
if (that->size_ == 0) {
// If the other state is empty, simply reset.
size_ = 0;
cursor_ = 0;
} else {
int pred_index = succ->PredecessorIndexOf(pred_block);
bool compact = false;
for (int i = 0; i < size_; i++) {
HCheckTableEntry* this_entry = &entries_[i];
HCheckTableEntry* that_entry;
if (this_entry->object_->IsPhi() &&
this_entry->object_->block() == succ) {
HPhi* phi = HPhi::cast(this_entry->object_);
HValue* phi_operand = phi->OperandAt(pred_index);
that_entry = that->Find(phi_operand);
} else {
that_entry = that->Find(this_entry->object_);
}
if (that_entry == NULL ||
(that_entry->state_ == HCheckTableEntry::CHECKED &&
this_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) ||
(this_entry->state_ == HCheckTableEntry::CHECKED &&
that_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE)) {
this_entry->object_ = NULL;
compact = true;
} else {
this_entry->maps_ =
this_entry->maps_->Union(that_entry->maps_, zone);
this_entry->state_ = HCheckTableEntry::StateMerge(
this_entry->state_, that_entry->state_);
if (this_entry->check_ != that_entry->check_) {
this_entry->check_ = NULL;
}
DCHECK(this_entry->maps_->size() > 0);
}
}
if (compact) Compact();
}
if (FLAG_trace_check_elimination) {
PrintF("B%d checkmaps-table merged with B%d table:\n",
succ->block_id(), pred_block->block_id());
Print(this);
}
return this;
}
void ReduceCheckMaps(HCheckMaps* instr) {
HValue* object = instr->value()->ActualValue();
HCheckTableEntry* entry = Find(object);
if (entry != NULL) {
// entry found;
HGraph* graph = instr->block()->graph();
if (entry->maps_->IsSubset(instr->maps())) {
// The first check is more strict; the second is redundant.
if (entry->check_ != NULL) {
DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
instr->id(), instr->block()->block_id(), entry->check_->id()));
instr->DeleteAndReplaceWith(entry->check_);
INC_STAT(redundant_);
} else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
DCHECK_NULL(entry->check_);
TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n",
instr->id(), instr->block()->block_id()));
instr->set_maps(entry->maps_->Copy(graph->zone()));
instr->MarkAsStabilityCheck();
entry->state_ = HCheckTableEntry::CHECKED_STABLE;
} else if (!instr->IsStabilityCheck()) {
TRACE(("Marking redundant CheckMaps #%d at B%d as dead\n",
instr->id(), instr->block()->block_id()));
// Mark check as dead but leave it in the graph as a checkpoint for
// subsequent checks.
instr->SetFlag(HValue::kIsDead);
entry->check_ = instr;
INC_STAT(removed_);
}
return;
}
MapSet intersection = instr->maps()->Intersect(
entry->maps_, graph->zone());
if (intersection->size() == 0) {
// Intersection is empty; probably megamorphic.
INC_STAT(empty_);
entry->object_ = NULL;
Compact();
} else {
// Update set of maps in the entry.
entry->maps_ = intersection;
// Update state of the entry.
if (instr->maps_are_stable() ||
entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
entry->state_ = HCheckTableEntry::CHECKED_STABLE;
}
if (intersection->size() != instr->maps()->size()) {
// Narrow set of maps in the second check maps instruction.
if (entry->check_ != NULL &&
entry->check_->block() == instr->block() &&
entry->check_->IsCheckMaps()) {
// There is a check in the same block so replace it with a more
// strict check and eliminate the second check entirely.
HCheckMaps* check = HCheckMaps::cast(entry->check_);
DCHECK(!check->IsStabilityCheck());
TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
check->block()->block_id()));
// Update map set and ensure that the check is alive.
check->set_maps(intersection);
check->ClearFlag(HValue::kIsDead);
TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
instr->id(), instr->block()->block_id(), entry->check_->id()));
instr->DeleteAndReplaceWith(entry->check_);
} else {
TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
instr->block()->block_id()));
instr->set_maps(intersection);
entry->check_ = instr->IsStabilityCheck() ? NULL : instr;
}
if (FLAG_trace_check_elimination) {
Print(this);
}
INC_STAT(narrowed_);
}
}
} else {
// No entry; insert a new one.
HCheckTableEntry::State state = instr->maps_are_stable()
? HCheckTableEntry::CHECKED_STABLE
: HCheckTableEntry::CHECKED;
HCheckMaps* check = instr->IsStabilityCheck() ? NULL : instr;
Insert(object, check, instr->maps(), state);
}
}
void ReduceCheckInstanceType(HCheckInstanceType* instr) {
HValue* value = instr->value()->ActualValue();
HCheckTableEntry* entry = Find(value);
if (entry == NULL) {
if (instr->check() == HCheckInstanceType::IS_STRING) {
Insert(value, NULL, string_maps(), HCheckTableEntry::CHECKED);
}
return;
}
UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(
entry->maps_->size(), zone());
for (int i = 0; i < entry->maps_->size(); ++i) {
InstanceType type;
Unique<Map> map = entry->maps_->at(i);
{
// This is safe, because maps don't move and their instance type does
// not change.
AllowHandleDereference allow_deref;
type = map.handle()->instance_type();
}
if (instr->is_interval_check()) {
InstanceType first_type, last_type;
instr->GetCheckInterval(&first_type, &last_type);
if (first_type <= type && type <= last_type) maps->Add(map, zone());
} else {
uint8_t mask, tag;
instr->GetCheckMaskAndTag(&mask, &tag);
if ((type & mask) == tag) maps->Add(map, zone());
}
}
if (maps->size() == entry->maps_->size()) {
TRACE(("Removing redundant CheckInstanceType #%d at B%d\n",
instr->id(), instr->block()->block_id()));
EnsureChecked(entry, value, instr);
instr->DeleteAndReplaceWith(value);
INC_STAT(removed_cit_);
} else if (maps->size() != 0) {
entry->maps_ = maps;
if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
entry->state_ = HCheckTableEntry::CHECKED_STABLE;
}
}
}
void ReduceLoadNamedField(HLoadNamedField* instr) {
// Reduce a load of the map field when it is known to be a constant.
if (!instr->access().IsMap()) {
// Check if we introduce field maps here.
MapSet maps = instr->maps();
if (maps != NULL) {
DCHECK_NE(0, maps->size());
Insert(instr, NULL, maps, HCheckTableEntry::UNCHECKED_STABLE);
}
return;
}
HValue* object = instr->object()->ActualValue();
HCheckTableEntry* entry = Find(object);
if (entry == NULL || entry->maps_->size() != 1) return; // Not a constant.
EnsureChecked(entry, object, instr);
Unique<Map> map = entry->maps_->at(0);
bool map_is_stable = (entry->state_ != HCheckTableEntry::CHECKED);
HConstant* constant = HConstant::CreateAndInsertBefore(
instr->block()->graph()->zone(), map, map_is_stable, instr);
instr->DeleteAndReplaceWith(constant);
INC_STAT(loads_);
}
void ReduceCheckHeapObject(HCheckHeapObject* instr) {
HValue* value = instr->value()->ActualValue();
if (Find(value) != NULL) {
// If the object has known maps, it's definitely a heap object.
instr->DeleteAndReplaceWith(value);
INC_STAT(removed_cho_);
}
}
void ReduceStoreNamedField(HStoreNamedField* instr) {
HValue* object = instr->object()->ActualValue();
if (instr->has_transition()) {
// This store transitions the object to a new map.
Kill(object);
HConstant* c_transition = HConstant::cast(instr->transition());
HCheckTableEntry::State state = c_transition->HasStableMapValue()
? HCheckTableEntry::CHECKED_STABLE
: HCheckTableEntry::CHECKED;
Insert(object, NULL, c_transition->MapValue(), state);
} else if (instr->access().IsMap()) {
// This is a store directly to the map field of the object.
Kill(object);
if (!instr->value()->IsConstant()) return;
HConstant* c_value = HConstant::cast(instr->value());
HCheckTableEntry::State state = c_value->HasStableMapValue()
? HCheckTableEntry::CHECKED_STABLE
: HCheckTableEntry::CHECKED;
Insert(object, NULL, c_value->MapValue(), state);
} else {
// If the instruction changes maps, it should be handled above.
CHECK(!instr->CheckChangesFlag(kMaps));
}
}
void ReduceCompareMap(HCompareMap* instr) {
HCheckTableEntry* entry = Find(instr->value()->ActualValue());
if (entry == NULL) return;
EnsureChecked(entry, instr->value(), instr);
int succ;
if (entry->maps_->Contains(instr->map())) {
if (entry->maps_->size() != 1) {
TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
"ambiguous set of maps\n", instr->id(), instr->value()->id(),
instr->block()->block_id()));
return;
}
succ = 0;
INC_STAT(compares_true_);
} else {
succ = 1;
INC_STAT(compares_false_);
}
TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
instr->id(), instr->value()->id(), instr->block()->block_id(),
succ == 0 ? "true" : "false"));
instr->set_known_successor_index(succ);
int unreachable_succ = 1 - succ;
instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) {
HValue* left = instr->left()->ActualValue();
HCheckTableEntry* le = Find(left);
if (le == NULL) return;
HValue* right = instr->right()->ActualValue();
HCheckTableEntry* re = Find(right);
if (re == NULL) return;
EnsureChecked(le, left, instr);
EnsureChecked(re, right, instr);
// TODO(bmeurer): Add a predicate here instead of computing the intersection
MapSet intersection = le->maps_->Intersect(re->maps_, zone());
if (intersection->size() > 0) return;
TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n",
instr->id(), instr->block()->block_id()));
int succ = 1;
instr->set_known_successor_index(succ);
int unreachable_succ = 1 - succ;
instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceIsStringAndBranch(HIsStringAndBranch* instr) {
HValue* value = instr->value()->ActualValue();
HCheckTableEntry* entry = Find(value);
if (entry == NULL) return;
EnsureChecked(entry, value, instr);
int succ;
if (entry->maps_->IsSubset(string_maps())) {
TRACE(("Marking redundant IsStringAndBranch #%d at B%d as true\n",
instr->id(), instr->block()->block_id()));
succ = 0;
} else {
MapSet intersection = entry->maps_->Intersect(string_maps(), zone());
if (intersection->size() > 0) return;
TRACE(("Marking redundant IsStringAndBranch #%d at B%d as false\n",
instr->id(), instr->block()->block_id()));
succ = 1;
}
instr->set_known_successor_index(succ);
int unreachable_succ = 1 - succ;
instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
}
void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
HValue* object = instr->object()->ActualValue();
HCheckTableEntry* entry = Find(object);
// Can only learn more about an object that already has a known set of maps.
if (entry == NULL) {
Kill(object);
return;
}
EnsureChecked(entry, object, instr);
if (entry->maps_->Contains(instr->original_map())) {
// If the object has the original map, it will be transitioned.
UniqueSet<Map>* maps = entry->maps_->Copy(zone());
maps->Remove(instr->original_map());
maps->Add(instr->transitioned_map(), zone());
HCheckTableEntry::State state =
(entry->state_ == HCheckTableEntry::CHECKED_STABLE &&
instr->map_is_stable())
? HCheckTableEntry::CHECKED_STABLE
: HCheckTableEntry::CHECKED;
Kill(object);
Insert(object, NULL, maps, state);
} else {
// Object does not have the given map, thus the transition is redundant.
instr->DeleteAndReplaceWith(object);
INC_STAT(transitions_);
}
}
void EnsureChecked(HCheckTableEntry* entry,
HValue* value,
HInstruction* instr) {
if (entry->state_ != HCheckTableEntry::UNCHECKED_STABLE) return;
HGraph* graph = instr->block()->graph();
HCheckMaps* check = HCheckMaps::CreateAndInsertBefore(
graph->zone(), value, entry->maps_->Copy(graph->zone()), true, instr);
check->MarkAsStabilityCheck();
entry->state_ = HCheckTableEntry::CHECKED_STABLE;
entry->check_ = NULL;
}
// Kill everything in the table.
void Kill() {
size_ = 0;
cursor_ = 0;
}
// Kill all unstable entries in the table.
void KillUnstableEntries() {
bool compact = false;
for (int i = 0; i < size_; ++i) {
HCheckTableEntry* entry = &entries_[i];
DCHECK_NOT_NULL(entry->object_);
if (entry->state_ == HCheckTableEntry::CHECKED) {
entry->object_ = NULL;
compact = true;
} else {
// All checked stable entries become unchecked stable.
entry->state_ = HCheckTableEntry::UNCHECKED_STABLE;
entry->check_ = NULL;
}
}
if (compact) Compact();
}
// Kill everything in the table that may alias {object}.
void Kill(HValue* object) {
bool compact = false;
for (int i = 0; i < size_; i++) {
HCheckTableEntry* entry = &entries_[i];
DCHECK_NOT_NULL(entry->object_);
if (phase_->aliasing_->MayAlias(entry->object_, object)) {
entry->object_ = NULL;
compact = true;
}
}
if (compact) Compact();
DCHECK_NULL(Find(object));
}
void Compact() {
// First, compact the array in place.
int max = size_, dest = 0, old_cursor = cursor_;
for (int i = 0; i < max; i++) {
if (entries_[i].object_ != NULL) {
if (dest != i) entries_[dest] = entries_[i];
dest++;
} else {
if (i < old_cursor) cursor_--;
size_--;
}
}
DCHECK(size_ == dest);
DCHECK(cursor_ <= size_);
// Preserve the age of the entries by moving the older entries to the end.
if (cursor_ == size_) return; // Cursor already points at end.
if (cursor_ != 0) {
// | L = oldest | R = newest | |
// ^ cursor ^ size ^ MAX
HCheckTableEntry tmp_entries[kMaxTrackedObjects];
int L = cursor_;
int R = size_ - cursor_;
MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry));
MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry));
MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry));
}
cursor_ = size_; // Move cursor to end.
}
static void Print(HCheckTable* table) {
if (table == NULL) {
PrintF(" unreachable\n");
return;
}
for (int i = 0; i < table->size_; i++) {
HCheckTableEntry* entry = &table->entries_[i];
DCHECK(entry->object_ != NULL);
PrintF(" checkmaps-table @%d: %s #%d ", i,
entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
if (entry->check_ != NULL) {
PrintF("check #%d ", entry->check_->id());
}
MapSet list = entry->maps_;
PrintF("%d %s maps { ", list->size(),
HCheckTableEntry::State2String(entry->state_));
for (int j = 0; j < list->size(); j++) {
if (j > 0) PrintF(", ");
PrintF("%" V8PRIxPTR, list->at(j).Hashcode());
}
PrintF(" }\n");
}
}
HCheckTableEntry* Find(HValue* object) {
for (int i = size_ - 1; i >= 0; i--) {
// Search from most-recently-inserted to least-recently-inserted.
HCheckTableEntry* entry = &entries_[i];
DCHECK(entry->object_ != NULL);
if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry;
}
return NULL;
}
void Insert(HValue* object,
HInstruction* check,
Unique<Map> map,
HCheckTableEntry::State state) {
Insert(object, check, new(zone()) UniqueSet<Map>(map, zone()), state);
}
void Insert(HValue* object,
HInstruction* check,
MapSet maps,
HCheckTableEntry::State state) {
DCHECK(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL);
HCheckTableEntry* entry = &entries_[cursor_++];
entry->object_ = object;
entry->check_ = check;
entry->maps_ = maps;
entry->state_ = state;
// If the table becomes full, wrap around and overwrite older entries.
if (cursor_ == kMaxTrackedObjects) cursor_ = 0;
if (size_ < kMaxTrackedObjects) size_++;
}
Zone* zone() const { return phase_->zone(); }
MapSet string_maps() const { return phase_->string_maps(); }
friend class HCheckMapsEffects;
friend class HCheckEliminationPhase;
HCheckEliminationPhase* phase_;
HCheckTableEntry entries_[kMaxTrackedObjects];
int16_t cursor_; // Must be <= kMaxTrackedObjects
int16_t size_; // Must be <= kMaxTrackedObjects
STATIC_ASSERT(kMaxTrackedObjects < (1 << 15));
};
// Collects instructions that can cause effects that invalidate information
// needed for check elimination.
class HCheckMapsEffects : public ZoneObject {
public:
explicit HCheckMapsEffects(Zone* zone) : objects_(0, zone) { }
// Effects are _not_ disabled.
inline bool Disabled() const { return false; }
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
switch (instr->opcode()) {
case HValue::kStoreNamedField: {
HStoreNamedField* store = HStoreNamedField::cast(instr);
if (store->access().IsMap() || store->has_transition()) {
objects_.Add(store->object(), zone);
}
break;
}
case HValue::kTransitionElementsKind: {
objects_.Add(HTransitionElementsKind::cast(instr)->object(), zone);
break;
}
default: {
flags_.Add(instr->ChangesFlags());
break;
}
}
}
// Apply these effects to the given check elimination table.
void Apply(HCheckTable* table) {
if (flags_.Contains(kOsrEntries)) {
// Uncontrollable map modifications; kill everything.
table->Kill();
return;
}
// Kill all unstable entries.
if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
table->KillUnstableEntries();
}
// Kill maps for each object contained in these effects.
for (int i = 0; i < objects_.length(); ++i) {
table->Kill(objects_[i]->ActualValue());
}
}
// Union these effects with the other effects.
void Union(HCheckMapsEffects* that, Zone* zone) {
flags_.Add(that->flags_);
for (int i = 0; i < that->objects_.length(); ++i) {
objects_.Add(that->objects_[i], zone);
}
}
private:
ZoneList<HValue*> objects_;
GVNFlagSet flags_;
};
// The main routine of the analysis phase. Use the HFlowEngine for either a
// local or a global analysis.
void HCheckEliminationPhase::Run() {
HFlowEngine<HCheckTable, HCheckMapsEffects> engine(graph(), zone());
HCheckTable* table = new(zone()) HCheckTable(this);
if (GLOBAL) {
// Perform a global analysis.
engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
} else {
// Perform only local analysis.
for (int i = 0; i < graph()->blocks()->length(); i++) {
table->Kill();
engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
}
}
if (FLAG_trace_check_elimination) PrintStats();
}
// Are we eliminated yet?
void HCheckEliminationPhase::PrintStats() {
#if DEBUG
#define PRINT_STAT(x) if (x##_ > 0) PrintF(" %-16s = %2d\n", #x, x##_)
#else
#define PRINT_STAT(x)
#endif
PRINT_STAT(redundant);
PRINT_STAT(removed);
PRINT_STAT(removed_cho);
PRINT_STAT(removed_cit);
PRINT_STAT(narrowed);
PRINT_STAT(loads);
PRINT_STAT(empty);
PRINT_STAT(compares_true);
PRINT_STAT(compares_false);
PRINT_STAT(transitions);
}
} // namespace internal
} // namespace v8

View File

@ -1,74 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
#define V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
#include "src/crankshaft/hydrogen.h"
#include "src/crankshaft/hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
// Remove CheckMaps instructions through flow- and branch-sensitive analysis.
class HCheckEliminationPhase : public HPhase {
public:
explicit HCheckEliminationPhase(HGraph* graph)
: HPhase("H_Check Elimination", graph), aliasing_(),
string_maps_(kStringMapsSize, zone()) {
// Compute the set of string maps.
#define ADD_STRING_MAP(type, size, name, Name) \
string_maps_.Add(Unique<Map>::CreateImmovable( \
graph->isolate()->factory()->name##_map()), zone());
STRING_TYPE_LIST(ADD_STRING_MAP)
#undef ADD_STRING_MAP
DCHECK_EQ(kStringMapsSize, string_maps_.size());
#ifdef DEBUG
redundant_ = 0;
removed_ = 0;
removed_cho_ = 0;
removed_cit_ = 0;
narrowed_ = 0;
loads_ = 0;
empty_ = 0;
compares_true_ = 0;
compares_false_ = 0;
transitions_ = 0;
#endif
}
void Run();
friend class HCheckTable;
private:
const UniqueSet<Map>* string_maps() const { return &string_maps_; }
void PrintStats();
HAliasAnalyzer* aliasing_;
#define COUNT(type, size, name, Name) + 1
static const int kStringMapsSize = 0 STRING_TYPE_LIST(COUNT);
#undef COUNT
UniqueSet<Map> string_maps_;
#ifdef DEBUG
int redundant_;
int removed_;
int removed_cho_;
int removed_cit_;
int narrowed_;
int loads_;
int empty_;
int compares_true_;
int compares_false_;
int transitions_;
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_

View File

@ -1,106 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-dce.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HDeadCodeEliminationPhase::MarkLive(
HValue* instr, ZoneList<HValue*>* worklist) {
if (instr->CheckFlag(HValue::kIsLive)) return; // Already live.
if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr);
// Transitively mark all inputs of live instructions live.
worklist->Add(instr, zone());
while (!worklist->is_empty()) {
HValue* instr = worklist->RemoveLast();
instr->SetFlag(HValue::kIsLive);
for (int i = 0; i < instr->OperandCount(); ++i) {
HValue* input = instr->OperandAt(i);
if (!input->CheckFlag(HValue::kIsLive)) {
input->SetFlag(HValue::kIsLive);
worklist->Add(input, zone());
if (FLAG_trace_dead_code_elimination) PrintLive(instr, input);
}
}
}
}
void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
AllowHandleDereference allow_deref;
OFStream os(stdout);
os << "[MarkLive ";
if (ref != NULL) {
os << *ref;
} else {
os << "root";
}
os << " -> " << *instr << "]" << std::endl;
}
void HDeadCodeEliminationPhase::MarkLiveInstructions() {
ZoneList<HValue*> worklist(10, zone());
// Transitively mark all live instructions, starting from roots.
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (instr->CannotBeEliminated()) MarkLive(instr, &worklist);
}
for (int j = 0; j < block->phis()->length(); j++) {
HPhi* phi = block->phis()->at(j);
if (phi->CannotBeEliminated()) MarkLive(phi, &worklist);
}
}
DCHECK(worklist.is_empty()); // Should have processed everything.
}
void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
ZoneList<HPhi*> worklist(graph()->blocks()->length(), zone());
// Remove any instruction not marked kIsLive.
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (!instr->CheckFlag(HValue::kIsLive)) {
// Instruction has not been marked live, so remove it.
instr->DeleteAndReplaceWith(NULL);
} else {
// Clear the liveness flag to leave the graph clean for the next DCE.
instr->ClearFlag(HValue::kIsLive);
}
}
// Collect phis that are dead and remove them in the next pass.
for (int j = 0; j < block->phis()->length(); j++) {
HPhi* phi = block->phis()->at(j);
if (!phi->CheckFlag(HValue::kIsLive)) {
worklist.Add(phi, zone());
} else {
phi->ClearFlag(HValue::kIsLive);
}
}
}
// Process phis separately to avoid simultaneously mutating the phi list.
while (!worklist.is_empty()) {
HPhi* phi = worklist.RemoveLast();
HBasicBlock* block = phi->block();
phi->DeleteAndReplaceWith(NULL);
if (phi->HasMergedIndex()) {
block->RecordDeletedPhi(phi->merged_index());
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,35 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_DCE_H_
#define V8_CRANKSHAFT_HYDROGEN_DCE_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HDeadCodeEliminationPhase : public HPhase {
public:
explicit HDeadCodeEliminationPhase(HGraph* graph)
: HPhase("H_Dead code elimination", graph) { }
void Run() {
MarkLiveInstructions();
RemoveDeadInstructions();
}
private:
void MarkLive(HValue* instr, ZoneList<HValue*>* worklist);
void PrintLive(HValue* ref, HValue* instr);
void MarkLiveInstructions();
void RemoveDeadInstructions();
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_DCE_H_

View File

@ -1,73 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-dehoist.h"
#include "src/base/safe_math.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
HValue* index = array_operation->GetKey()->ActualValue();
if (!index->representation().IsSmiOrInteger32()) return;
if (!index->IsAdd() && !index->IsSub()) return;
HConstant* constant;
HValue* subexpression;
HBinaryOperation* binary_operation = HBinaryOperation::cast(index);
if (binary_operation->left()->IsConstant() && index->IsAdd()) {
subexpression = binary_operation->right();
constant = HConstant::cast(binary_operation->left());
} else if (binary_operation->right()->IsConstant()) {
subexpression = binary_operation->left();
constant = HConstant::cast(binary_operation->right());
} else {
return;
}
if (!constant->HasInteger32Value()) return;
v8::base::internal::CheckedNumeric<int32_t> checked_value =
constant->Integer32Value();
int32_t sign = binary_operation->IsSub() ? -1 : 1;
checked_value = checked_value * sign;
// Multiply value by elements size, bailing out on overflow.
int32_t elements_kind_size =
1 << ElementsKindToShiftSize(array_operation->elements_kind());
checked_value = checked_value * elements_kind_size;
if (!checked_value.IsValid()) return;
int32_t value = checked_value.ValueOrDie();
if (value < 0) return;
// Ensure that the array operation can add value to existing base offset
// without overflowing.
if (!array_operation->TryIncreaseBaseOffset(value)) return;
array_operation->SetKey(subexpression);
if (binary_operation->HasNoUses()) {
binary_operation->DeleteAndReplaceWith(NULL);
}
array_operation->SetDehoisted(true);
}
void HDehoistIndexComputationsPhase::Run() {
const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
for (int i = 0; i < blocks->length(); ++i) {
for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (instr->IsLoadKeyed()) {
DehoistArrayIndex(HLoadKeyed::cast(instr));
} else if (instr->IsStoreKeyed()) {
DehoistArrayIndex(HStoreKeyed::cast(instr));
}
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,29 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
#define V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HDehoistIndexComputationsPhase : public HPhase {
public:
explicit HDehoistIndexComputationsPhase(HGraph* graph)
: HPhase("H_Dehoist index computations", graph) { }
void Run();
private:
DISALLOW_COPY_AND_ASSIGN(HDehoistIndexComputationsPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_

View File

@ -1,229 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-environment-liveness.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
HEnvironmentLivenessAnalysisPhase::HEnvironmentLivenessAnalysisPhase(
HGraph* graph)
: HPhase("H_Environment liveness analysis", graph),
block_count_(graph->blocks()->length()),
maximum_environment_size_(graph->maximum_environment_size()),
live_at_block_start_(block_count_, zone()),
first_simulate_(block_count_, zone()),
first_simulate_invalid_for_index_(block_count_, zone()),
markers_(maximum_environment_size_, zone()),
collect_markers_(true),
last_simulate_(NULL),
went_live_since_last_simulate_(maximum_environment_size_, zone()) {
DCHECK(maximum_environment_size_ > 0);
for (int i = 0; i < block_count_; ++i) {
live_at_block_start_.Add(
new(zone()) BitVector(maximum_environment_size_, zone()), zone());
first_simulate_.Add(NULL, zone());
first_simulate_invalid_for_index_.Add(
new(zone()) BitVector(maximum_environment_size_, zone()), zone());
}
}
void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
int index, HSimulate* simulate) {
int operand_index = simulate->ToOperandIndex(index);
if (operand_index == -1) {
simulate->AddAssignedValue(index, graph()->GetConstantOptimizedOut());
} else {
simulate->SetOperandAt(operand_index, graph()->GetConstantOptimizedOut());
}
}
void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors(
HBasicBlock* block, BitVector* live) {
// When a value is live in successor A but dead in B, we must
// explicitly zap it in B.
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
HBasicBlock* successor = it.Current();
int successor_id = successor->block_id();
BitVector* live_in_successor = live_at_block_start_[successor_id];
if (live_in_successor->Equals(*live)) continue;
for (int i = 0; i < live->length(); ++i) {
if (!live->Contains(i)) continue;
if (live_in_successor->Contains(i)) continue;
if (first_simulate_invalid_for_index_.at(successor_id)->Contains(i)) {
continue;
}
HSimulate* simulate = first_simulate_.at(successor_id);
if (simulate == NULL) continue;
DCHECK(VerifyClosures(simulate->closure(),
block->last_environment()->closure()));
ZapEnvironmentSlot(i, simulate);
}
}
}
void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction(
HEnvironmentMarker* marker) {
if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
HSimulate* simulate = marker->next_simulate();
if (simulate != NULL) {
DCHECK(VerifyClosures(simulate->closure(), marker->closure()));
ZapEnvironmentSlot(marker->index(), simulate);
}
}
void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtBlockEnd(
HBasicBlock* block,
BitVector* live) {
// Liveness at the end of each block: union of liveness in successors.
live->Clear();
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
live->Union(*live_at_block_start_[it.Current()->block_id()]);
}
}
void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
HInstruction* instr,
BitVector* live) {
switch (instr->opcode()) {
case HValue::kEnvironmentMarker: {
HEnvironmentMarker* marker = HEnvironmentMarker::cast(instr);
int index = marker->index();
if (!live->Contains(index)) {
marker->SetFlag(HValue::kEndsLiveRange);
} else {
marker->ClearFlag(HValue::kEndsLiveRange);
}
if (!went_live_since_last_simulate_.Contains(index)) {
marker->set_next_simulate(last_simulate_);
}
if (marker->kind() == HEnvironmentMarker::LOOKUP) {
live->Add(index);
} else {
DCHECK(marker->kind() == HEnvironmentMarker::BIND);
live->Remove(index);
went_live_since_last_simulate_.Add(index);
}
if (collect_markers_) {
// Populate |markers_| list during the first pass.
markers_.Add(marker, zone());
}
break;
}
case HValue::kLeaveInlined:
// No environment values are live at the end of an inlined section.
live->Clear();
last_simulate_ = NULL;
// The following DCHECKs guard the assumption used in case
// kEnterInlined below:
DCHECK(instr->next()->IsSimulate());
DCHECK(instr->next()->next()->IsGoto());
break;
case HValue::kEnterInlined: {
// Those environment values are live that are live at any return
// target block. Here we make use of the fact that the end of an
// inline sequence always looks like this: HLeaveInlined, HSimulate,
// HGoto (to return_target block), with no environment lookups in
// between (see DCHECKs above).
HEnterInlined* enter = HEnterInlined::cast(instr);
live->Clear();
for (int i = 0; i < enter->return_targets()->length(); ++i) {
int return_id = enter->return_targets()->at(i)->block_id();
live->Union(*live_at_block_start_[return_id]);
}
last_simulate_ = NULL;
break;
}
case HValue::kSimulate:
last_simulate_ = HSimulate::cast(instr);
went_live_since_last_simulate_.Clear();
break;
default:
break;
}
}
void HEnvironmentLivenessAnalysisPhase::Run() {
DCHECK(maximum_environment_size_ > 0);
// Main iteration. Compute liveness of environment slots, and store it
// for each block until it doesn't change any more. For efficiency, visit
// blocks in reverse order and walk backwards through each block. We
// need several iterations to propagate liveness through nested loops.
BitVector live(maximum_environment_size_, zone());
BitVector worklist(block_count_, zone());
for (int i = 0; i < block_count_; ++i) {
worklist.Add(i);
}
while (!worklist.IsEmpty()) {
for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
if (!worklist.Contains(block_id)) {
continue;
}
worklist.Remove(block_id);
last_simulate_ = NULL;
HBasicBlock* block = graph()->blocks()->at(block_id);
UpdateLivenessAtBlockEnd(block, &live);
for (HInstruction* instr = block->end(); instr != NULL;
instr = instr->previous()) {
UpdateLivenessAtInstruction(instr, &live);
}
// Reached the start of the block, do necessary bookkeeping:
// store computed information for this block and add predecessors
// to the work list as necessary.
first_simulate_.Set(block_id, last_simulate_);
first_simulate_invalid_for_index_[block_id]->CopyFrom(
went_live_since_last_simulate_);
if (live_at_block_start_[block_id]->UnionIsChanged(live)) {
for (int i = 0; i < block->predecessors()->length(); ++i) {
worklist.Add(block->predecessors()->at(i)->block_id());
}
}
}
// Only collect bind/lookup instructions during the first pass.
collect_markers_ = false;
}
// Analysis finished. Zap dead environment slots.
for (int i = 0; i < markers_.length(); ++i) {
ZapEnvironmentSlotsForInstruction(markers_[i]);
}
for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
HBasicBlock* block = graph()->blocks()->at(block_id);
UpdateLivenessAtBlockEnd(block, &live);
ZapEnvironmentSlotsInSuccessors(block, &live);
}
// Finally, remove the HEnvironment{Bind,Lookup} markers.
for (int i = 0; i < markers_.length(); ++i) {
markers_[i]->DeleteAndReplaceWith(NULL);
}
}
#ifdef DEBUG
bool HEnvironmentLivenessAnalysisPhase::VerifyClosures(
Handle<JSFunction> a, Handle<JSFunction> b) {
base::LockGuard<base::Mutex> guard(isolate()->heap()->relocation_mutex());
AllowHandleDereference for_verification;
return a.is_identical_to(b);
}
#endif
} // namespace internal
} // namespace v8

View File

@ -1,68 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
#define V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
// Trims live ranges of environment slots by doing explicit liveness analysis.
// Values in the environment are kept alive by every subsequent LInstruction
// that is assigned an LEnvironment, which creates register pressure and
// unnecessary spill slot moves. Therefore it is beneficial to trim the
// live ranges of environment slots by zapping them with a constant after
// the last lookup that refers to them.
// Slots are identified by their index and only affected if whitelisted in
// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
class HEnvironmentLivenessAnalysisPhase : public HPhase {
public:
explicit HEnvironmentLivenessAnalysisPhase(HGraph* graph);
void Run();
private:
void ZapEnvironmentSlot(int index, HSimulate* simulate);
void ZapEnvironmentSlotsInSuccessors(HBasicBlock* block, BitVector* live);
void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker);
void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
#ifdef DEBUG
bool VerifyClosures(Handle<JSFunction> a, Handle<JSFunction> b);
#endif
int block_count_;
// Largest number of local variables in any environment in the graph
// (including inlined environments).
int maximum_environment_size_;
// Per-block data. All these lists are indexed by block_id.
ZoneList<BitVector*> live_at_block_start_;
ZoneList<HSimulate*> first_simulate_;
ZoneList<BitVector*> first_simulate_invalid_for_index_;
// List of all HEnvironmentMarker instructions for quick iteration/deletion.
// It is populated during the first pass over the graph, controlled by
// |collect_markers_|.
ZoneList<HEnvironmentMarker*> markers_;
bool collect_markers_;
// Keeps track of the last simulate seen, as well as the environment slots
// for which a new live range has started since (so they must not be zapped
// in that simulate when the end of another live range of theirs is found).
HSimulate* last_simulate_;
BitVector went_live_since_last_simulate_;
DISALLOW_COPY_AND_ASSIGN(HEnvironmentLivenessAnalysisPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_

View File

@ -1,327 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-escape-analysis.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value, int size) {
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->HasEscapingOperandAt(it.index())) {
if (FLAG_trace_escape_analysis) {
PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(),
value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
}
return false;
}
if (use->HasOutOfBoundsAccess(size)) {
if (FLAG_trace_escape_analysis) {
PrintF("#%d (%s) out of bounds at #%d (%s) @%d\n", value->id(),
value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
}
return false;
}
int redefined_index = use->RedefinedOperandIndex();
if (redefined_index == it.index() && !HasNoEscapingUses(use, size)) {
if (FLAG_trace_escape_analysis) {
PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(),
value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
}
return false;
}
}
return true;
}
void HEscapeAnalysisPhase::CollectCapturedValues() {
int block_count = graph()->blocks()->length();
for (int i = 0; i < block_count; ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (!instr->IsAllocate()) continue;
HAllocate* allocate = HAllocate::cast(instr);
if (!allocate->size()->IsInteger32Constant()) continue;
int size_in_bytes = allocate->size()->GetInteger32Constant();
if (HasNoEscapingUses(instr, size_in_bytes)) {
if (FLAG_trace_escape_analysis) {
PrintF("#%d (%s) is being captured\n", instr->id(),
instr->Mnemonic());
}
captured_.Add(instr, zone());
}
}
}
}
HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) {
Zone* zone = graph()->zone();
HCapturedObject* state =
new(zone) HCapturedObject(number_of_values_, number_of_objects_, zone);
state->InsertAfter(previous);
return state;
}
// Create a new state for replacing HAllocate instructions.
HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation(
HInstruction* previous) {
HConstant* undefined = graph()->GetConstantUndefined();
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
state->SetOperandAt(index, undefined);
}
return state;
}
// Create a new state full of phis for loop header entries.
HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
HInstruction* previous,
HCapturedObject* old_state) {
HBasicBlock* block = previous->block();
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = old_state->OperandAt(index);
HPhi* phi = NewPhiAndInsert(block, operand, index);
state->SetOperandAt(index, phi);
}
return state;
}
// Create a new state by copying an existing one.
HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
HInstruction* previous,
HCapturedObject* old_state) {
HCapturedObject* state = NewState(previous);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = old_state->OperandAt(index);
state->SetOperandAt(index, operand);
}
return state;
}
// Insert a newly created phi into the given block and fill all incoming
// edges with the given value.
HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block,
HValue* incoming_value,
int index) {
Zone* zone = graph()->zone();
HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
for (int i = 0; i < block->predecessors()->length(); i++) {
phi->AddInput(incoming_value);
}
block->AddPhi(phi);
return phi;
}
// Insert a newly created value check as a replacement for map checks.
HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
HCheckMaps* mapcheck) {
Zone* zone = graph()->zone();
HValue* value = state->map_value();
// TODO(mstarzinger): This will narrow a map check against a set of maps
// down to the first element in the set. Revisit and fix this.
HCheckValue* check = HCheckValue::New(graph()->isolate(), zone, NULL, value,
mapcheck->maps()->at(0), false);
check->InsertBefore(mapcheck);
return check;
}
// Replace a field load with a given value, forcing Smi representation if
// necessary.
HValue* HEscapeAnalysisPhase::NewLoadReplacement(
HLoadNamedField* load, HValue* load_value) {
HValue* replacement = load_value;
Representation representation = load->representation();
if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
Zone* zone = graph()->zone();
HInstruction* new_instr = HForceRepresentation::New(
graph()->isolate(), zone, NULL, load_value, representation);
new_instr->InsertAfter(load);
replacement = new_instr;
}
return replacement;
}
// Performs a forward data-flow analysis of all loads and stores on the
// given captured allocation. This uses a reverse post-order iteration
// over affected basic blocks. All non-escaping instructions are handled
// and replaced during the analysis.
void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
HBasicBlock* allocate_block = allocate->block();
block_states_.AddBlock(NULL, graph()->blocks()->length(), zone());
// Iterate all blocks starting with the allocation block, since the
// allocation cannot dominate blocks that come before.
int start = allocate_block->block_id();
for (int i = start; i < graph()->blocks()->length(); i++) {
HBasicBlock* block = graph()->blocks()->at(i);
HCapturedObject* state = StateAt(block);
// Skip blocks that are not dominated by the captured allocation.
if (!allocate_block->Dominates(block) && allocate_block != block) continue;
if (FLAG_trace_escape_analysis) {
PrintF("Analyzing data-flow in B%d\n", block->block_id());
}
// Go through all instructions of the current block.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
switch (instr->opcode()) {
case HValue::kAllocate: {
if (instr != allocate) continue;
state = NewStateForAllocation(allocate);
break;
}
case HValue::kLoadNamedField: {
HLoadNamedField* load = HLoadNamedField::cast(instr);
int index = load->access().offset() / kPointerSize;
if (load->object() != allocate) continue;
DCHECK(load->access().IsInobject());
HValue* replacement =
NewLoadReplacement(load, state->OperandAt(index));
load->DeleteAndReplaceWith(replacement);
if (FLAG_trace_escape_analysis) {
PrintF("Replacing load #%d with #%d (%s)\n", load->id(),
replacement->id(), replacement->Mnemonic());
}
break;
}
case HValue::kStoreNamedField: {
HStoreNamedField* store = HStoreNamedField::cast(instr);
int index = store->access().offset() / kPointerSize;
if (store->object() != allocate) continue;
DCHECK(store->access().IsInobject());
state = NewStateCopy(store->previous(), state);
state->SetOperandAt(index, store->value());
if (store->has_transition()) {
state->SetOperandAt(0, store->transition());
}
if (store->HasObservableSideEffects()) {
state->ReuseSideEffectsFromStore(store);
}
store->DeleteAndReplaceWith(store->ActualValue());
if (FLAG_trace_escape_analysis) {
PrintF("Replacing store #%d%s\n", instr->id(),
store->has_transition() ? " (with transition)" : "");
}
break;
}
case HValue::kArgumentsObject:
case HValue::kCapturedObject:
case HValue::kSimulate: {
for (int i = 0; i < instr->OperandCount(); i++) {
if (instr->OperandAt(i) != allocate) continue;
instr->SetOperandAt(i, state);
}
break;
}
case HValue::kCheckHeapObject: {
HCheckHeapObject* check = HCheckHeapObject::cast(instr);
if (check->value() != allocate) continue;
check->DeleteAndReplaceWith(check->ActualValue());
break;
}
case HValue::kCheckMaps: {
HCheckMaps* mapcheck = HCheckMaps::cast(instr);
if (mapcheck->value() != allocate) continue;
NewMapCheckAndInsert(state, mapcheck);
mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue());
break;
}
default:
// Nothing to see here, move along ...
break;
}
}
// Propagate the block state forward to all successor blocks.
for (int i = 0; i < block->end()->SuccessorCount(); i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
if (!allocate_block->Dominates(succ)) continue;
if (succ->predecessors()->length() == 1) {
// Case 1: This is the only predecessor, just reuse state.
SetStateAt(succ, state);
} else if (StateAt(succ) == NULL && succ->IsLoopHeader()) {
// Case 2: This is a state that enters a loop header, be
// pessimistic about loop headers, add phis for all values.
SetStateAt(succ, NewStateForLoopHeader(succ->first(), state));
} else if (StateAt(succ) == NULL) {
// Case 3: This is the first state propagated forward to the
// successor, leave a copy of the current state.
SetStateAt(succ, NewStateCopy(succ->first(), state));
} else {
// Case 4: This is a state that needs merging with previously
// propagated states, potentially introducing new phis lazily or
// adding values to existing phis.
HCapturedObject* succ_state = StateAt(succ);
for (int index = 0; index < number_of_values_; index++) {
HValue* operand = state->OperandAt(index);
HValue* succ_operand = succ_state->OperandAt(index);
if (succ_operand->IsPhi() && succ_operand->block() == succ) {
// Phi already exists, add operand.
HPhi* phi = HPhi::cast(succ_operand);
phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
} else if (succ_operand != operand) {
// Phi does not exist, introduce one.
HPhi* phi = NewPhiAndInsert(succ, succ_operand, index);
phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
succ_state->SetOperandAt(index, phi);
}
}
}
}
}
// All uses have been handled.
DCHECK(allocate->HasNoUses());
allocate->DeleteAndReplaceWith(NULL);
}
void HEscapeAnalysisPhase::PerformScalarReplacement() {
for (int i = 0; i < captured_.length(); i++) {
HAllocate* allocate = HAllocate::cast(captured_.at(i));
// Compute number of scalar values and start with clean slate.
int size_in_bytes = allocate->size()->GetInteger32Constant();
number_of_values_ = size_in_bytes / kPointerSize;
number_of_objects_++;
block_states_.Rewind(0);
// Perform actual analysis step.
AnalyzeDataFlow(allocate);
cumulative_values_ += number_of_values_;
DCHECK(allocate->HasNoUses());
DCHECK(!allocate->IsLinked());
}
}
void HEscapeAnalysisPhase::Run() {
int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
for (int i = 0; i < max_fixpoint_iteration_count; i++) {
CollectCapturedValues();
if (captured_.is_empty()) break;
PerformScalarReplacement();
captured_.Rewind(0);
}
}
} // namespace internal
} // namespace v8

View File

@ -1,71 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
#define V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
#include "src/allocation.h"
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HEscapeAnalysisPhase : public HPhase {
public:
explicit HEscapeAnalysisPhase(HGraph* graph)
: HPhase("H_Escape analysis", graph),
captured_(0, zone()),
number_of_objects_(0),
number_of_values_(0),
cumulative_values_(0),
block_states_(graph->blocks()->length(), zone()) { }
void Run();
private:
void CollectCapturedValues();
bool HasNoEscapingUses(HValue* value, int size);
void PerformScalarReplacement();
void AnalyzeDataFlow(HInstruction* instr);
HCapturedObject* NewState(HInstruction* prev);
HCapturedObject* NewStateForAllocation(HInstruction* prev);
HCapturedObject* NewStateForLoopHeader(HInstruction* prev, HCapturedObject*);
HCapturedObject* NewStateCopy(HInstruction* prev, HCapturedObject* state);
HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
HValue* NewLoadReplacement(HLoadNamedField* load, HValue* load_value);
HCapturedObject* StateAt(HBasicBlock* block) {
return block_states_.at(block->block_id());
}
void SetStateAt(HBasicBlock* block, HCapturedObject* state) {
block_states_.Set(block->block_id(), state);
}
// List of allocations captured during collection phase.
ZoneList<HInstruction*> captured_;
// Number of captured objects on which scalar replacement was done.
int number_of_objects_;
// Number of scalar values tracked during scalar replacement phase.
int number_of_values_;
int cumulative_values_;
// Map of block IDs to the data-flow state at block entry during the
// scalar replacement phase.
ZoneList<HCapturedObject*> block_states_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_

View File

@ -1,220 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
#define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/crankshaft/hydrogen.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
// An example implementation of effects that doesn't collect anything.
class NoEffects : public ZoneObject {
public:
explicit NoEffects(Zone* zone) { }
inline bool Disabled() {
return true; // Nothing to do.
}
template <class State>
inline void Apply(State* state) {
// do nothing.
}
inline void Process(HInstruction* value, Zone* zone) {
// do nothing.
}
inline void Union(NoEffects* other, Zone* zone) {
// do nothing.
}
};
// An example implementation of state that doesn't track anything.
class NoState {
public:
inline NoState* Copy(HBasicBlock* succ, Zone* zone) {
return this;
}
inline NoState* Process(HInstruction* value, Zone* zone) {
return this;
}
inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) {
return this;
}
};
// This class implements an engine that can drive flow-sensitive analyses
// over a graph of basic blocks, either one block at a time (local analysis)
// or over the entire graph (global analysis). The flow engine is parameterized
// by the type of the state and the effects collected while walking over the
// graph.
//
// The "State" collects which facts are known while passing over instructions
// in control flow order, and the "Effects" collect summary information about
// which facts could be invalidated on other control flow paths. The effects
// are necessary to correctly handle loops in the control flow graph without
// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit
// each block at most twice; once for state, and optionally once for effects.
//
// The flow engine requires the State and Effects classes to implement methods
// like the example NoState and NoEffects above. It's not necessary to provide
// an effects implementation for local analysis.
template <class State, class Effects>
class HFlowEngine {
public:
HFlowEngine(HGraph* graph, Zone* zone)
: graph_(graph),
zone_(zone),
#if DEBUG
pred_counts_(graph->blocks()->length(), zone),
#endif
block_states_(graph->blocks()->length(), zone),
loop_effects_(graph->blocks()->length(), zone) {
loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone);
}
// Local analysis. Iterates over the instructions in the given block.
State* AnalyzeOneBlock(HBasicBlock* block, State* state) {
// Go through all instructions of the current block, updating the state.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
state = state->Process(it.Current(), zone_);
}
return state;
}
// Global analysis. Iterates over all blocks that are dominated by the given
// block, starting with the initial state. Computes effects for nested loops.
void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) {
InitializeStates();
SetStateAt(root, initial);
// Iterate all dominated blocks starting from the given start block.
for (int i = root->block_id(); i < graph_->blocks()->length(); i++) {
HBasicBlock* block = graph_->blocks()->at(i);
// Skip blocks not dominated by the root node.
if (SkipNonDominatedBlock(root, block)) continue;
State* state = State::Finish(StateAt(block), block, zone_);
if (block->IsReachable()) {
DCHECK(state != NULL);
if (block->IsLoopHeader()) {
// Apply loop effects before analyzing loop body.
ComputeLoopEffects(block)->Apply(state);
} else {
// Must have visited all predecessors before this block.
CheckPredecessorCount(block);
}
// Go through all instructions of the current block, updating the state.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
state = state->Process(it.Current(), zone_);
}
}
// Propagate the block state forward to all successor blocks.
int max = block->end()->SuccessorCount();
for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);
if (max == 1 && succ->predecessors()->length() == 1) {
// Optimization: successor can inherit this state.
SetStateAt(succ, state);
} else {
// Merge the current state with the state already at the successor.
SetStateAt(succ,
State::Merge(StateAt(succ), succ, state, block, zone_));
}
}
}
}
private:
// Computes and caches the loop effects for the loop which has the given
// block as its loop header.
Effects* ComputeLoopEffects(HBasicBlock* block) {
DCHECK(block->IsLoopHeader());
Effects* effects = loop_effects_[block->block_id()];
if (effects != NULL) return effects; // Already analyzed this loop.
effects = new(zone_) Effects(zone_);
loop_effects_[block->block_id()] = effects;
if (effects->Disabled()) return effects; // No effects for this analysis.
HLoopInformation* loop = block->loop_information();
int end = loop->GetLastBackEdge()->block_id();
// Process the blocks between the header and the end.
for (int i = block->block_id(); i <= end; i++) {
HBasicBlock* member = graph_->blocks()->at(i);
if (i != block->block_id() && member->IsLoopHeader()) {
// Recursively compute and cache the effects of the nested loop.
DCHECK(member->loop_information()->parent_loop() == loop);
Effects* nested = ComputeLoopEffects(member);
effects->Union(nested, zone_);
// Skip the nested loop's blocks.
i = member->loop_information()->GetLastBackEdge()->block_id();
} else {
// Process all the effects of the block.
if (member->IsUnreachable()) continue;
DCHECK(member->current_loop() == loop);
for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
effects->Process(it.Current(), zone_);
}
}
}
return effects;
}
inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) {
if (root->block_id() == 0) return false; // Visit the whole graph.
if (root == other) return false; // Always visit the root.
return !root->Dominates(other); // Only visit dominated blocks.
}
inline State* StateAt(HBasicBlock* block) {
return block_states_.at(block->block_id());
}
inline void SetStateAt(HBasicBlock* block, State* state) {
block_states_.Set(block->block_id(), state);
}
inline void InitializeStates() {
#if DEBUG
pred_counts_.Rewind(0);
pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_);
#endif
block_states_.Rewind(0);
block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_);
}
inline void CheckPredecessorCount(HBasicBlock* block) {
DCHECK(block->predecessors()->length() == pred_counts_[block->block_id()]);
}
inline void IncrementPredecessorCount(HBasicBlock* block) {
#if DEBUG
pred_counts_[block->block_id()]++;
#endif
}
HGraph* graph_; // The hydrogen graph.
Zone* zone_; // Temporary zone.
#if DEBUG
ZoneList<int> pred_counts_; // Finished predecessors (by block id).
#endif
ZoneList<State*> block_states_; // Block states (by block id).
ZoneList<Effects*> loop_effects_; // Loop effects (by block id).
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_

View File

@ -1,895 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-gvn.h"
#include "src/crankshaft/hydrogen.h"
#include "src/objects-inl.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
class HInstructionMap final : public ZoneObject {
public:
HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
: array_size_(0),
lists_size_(0),
count_(0),
array_(NULL),
lists_(NULL),
free_list_head_(kNil),
side_effects_tracker_(side_effects_tracker) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
void Kill(SideEffects side_effects);
void Add(HInstruction* instr, Zone* zone) {
present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
Insert(instr, zone);
}
HInstruction* Lookup(HInstruction* instr) const;
HInstructionMap* Copy(Zone* zone) const {
return new(zone) HInstructionMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
// A linked list of HInstruction* values. Stored in arrays.
struct HInstructionMapListElement {
HInstruction* instr;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
// Must be a power of 2.
static const int kInitialSize = 16;
HInstructionMap(Zone* zone, const HInstructionMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
void Insert(HInstruction* instr, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
int count_; // The number of values stored in the HInstructionMap.
SideEffects present_depends_on_;
HInstructionMapListElement* array_;
// Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
HInstructionMapListElement* lists_;
// The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
SideEffectsTracker* side_effects_tracker_;
};
class HSideEffectMap final BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
void Kill(SideEffects side_effects);
void Store(SideEffects side_effects, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
inline HInstruction* operator[](int i) const {
DCHECK(0 <= i);
DCHECK(i < kNumberOfTrackedSideEffects);
return data_[i];
}
inline HInstruction* at(int i) const { return operator[](i); }
private:
int count_;
HInstruction* data_[kNumberOfTrackedSideEffects];
};
void TraceGVN(const char* msg, ...) {
va_list arguments;
va_start(arguments, msg);
base::OS::VPrint(msg, arguments);
va_end(arguments);
}
// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
// --trace-gvn is off.
#define TRACE_GVN_1(msg, a1) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1); \
}
#define TRACE_GVN_2(msg, a1, a2) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2); \
}
#define TRACE_GVN_3(msg, a1, a2, a3) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2, a3); \
}
#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2, a3, a4); \
}
#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2, a3, a4, a5); \
}
HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
present_depends_on_(other->present_depends_on_),
array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
free_list_head_(other->free_list_head_),
side_effects_tracker_(other->side_effects_tracker_) {
MemCopy(array_, other->array_,
array_size_ * sizeof(HInstructionMapListElement));
MemCopy(lists_, other->lists_,
lists_size_ * sizeof(HInstructionMapListElement));
}
void HInstructionMap::Kill(SideEffects changes) {
if (!present_depends_on_.ContainsAnyOf(changes)) return;
present_depends_on_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
HInstruction* instr = array_[i].instr;
if (instr != NULL) {
// Clear list of collisions first, so we know if it becomes empty.
int kept = kNil; // List of kept elements.
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
HInstruction* instr = lists_[current].instr;
SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
if (depends_on.ContainsAnyOf(changes)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
free_list_head_ = current;
} else {
// Keep it.
lists_[current].next = kept;
kept = current;
present_depends_on_.Add(depends_on);
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
instr = array_[i].instr;
SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
if (depends_on.ContainsAnyOf(changes)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
array_[i].instr = NULL;
} else {
array_[i].instr = lists_[head].instr;
array_[i].next = lists_[head].next;
lists_[head].next = free_list_head_;
free_list_head_ = head;
}
} else {
present_depends_on_.Add(depends_on); // Keep it.
}
}
}
}
HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
uint32_t pos = Bound(hash);
if (array_[pos].instr != NULL) {
if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
int next = array_[pos].next;
while (next != kNil) {
if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
next = lists_[next].next;
}
}
return NULL;
}
void HInstructionMap::Resize(int new_size, Zone* zone) {
DCHECK(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
// Make sure we have at least one free element.
if (free_list_head_ == kNil) {
ResizeLists(lists_size_ << 1, zone);
}
HInstructionMapListElement* new_array =
zone->NewArray<HInstructionMapListElement>(new_size);
memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
HInstructionMapListElement* old_array = array_;
int old_size = array_size_;
int old_count = count_;
count_ = 0;
// Do not modify present_depends_on_. It is currently correct.
array_size_ = new_size;
array_ = new_array;
if (old_array != NULL) {
// Iterate over all the elements in lists, rehashing them.
for (int i = 0; i < old_size; ++i) {
if (old_array[i].instr != NULL) {
int current = old_array[i].next;
while (current != kNil) {
Insert(lists_[current].instr, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
// Rehash the directly stored instruction.
Insert(old_array[i].instr, zone);
}
}
}
USE(old_count);
DCHECK(count_ == old_count);
}
void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
DCHECK(new_size > lists_size_);
HInstructionMapListElement* new_lists =
zone->NewArray<HInstructionMapListElement>(new_size);
memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
HInstructionMapListElement* old_lists = lists_;
int old_size = lists_size_;
lists_size_ = new_size;
lists_ = new_lists;
if (old_lists != NULL) {
MemCopy(lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
free_list_head_ = i;
}
}
void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
DCHECK(instr != NULL);
// Resizing when half of the hashtable is filled up.
if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
DCHECK(count_ < array_size_);
count_++;
uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
if (array_[pos].instr == NULL) {
array_[pos].instr = instr;
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
ResizeLists(lists_size_ << 1, zone);
}
int new_element_pos = free_list_head_;
DCHECK(new_element_pos != kNil);
free_list_head_ = lists_[free_list_head_].next;
lists_[new_element_pos].instr = instr;
lists_[new_element_pos].next = array_[pos].next;
DCHECK(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
array_[pos].next = new_element_pos;
}
}
HSideEffectMap::HSideEffectMap() : count_(0) {
memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
}
HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
*this = *other; // Calls operator=.
}
HSideEffectMap& HSideEffectMap::operator=(const HSideEffectMap& other) {
if (this != &other) {
MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
}
return *this;
}
void HSideEffectMap::Kill(SideEffects side_effects) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] != NULL) count_--;
data_[i] = NULL;
}
}
}
void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
if (data_[i] == NULL) count_++;
data_[i] = instr;
}
}
}
SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
int index;
SideEffects result(instr->ChangesFlags());
if (result.ContainsFlag(kGlobalVars)) {
if (instr->IsStoreNamedField()) {
HStoreNamedField* store = HStoreNamedField::cast(instr);
HConstant* target = HConstant::cast(store->object());
if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
&index)) {
result.RemoveFlag(kGlobalVars);
result.AddSpecial(GlobalVar(index));
return result;
}
}
for (index = 0; index < kNumberOfGlobalVars; ++index) {
result.AddSpecial(GlobalVar(index));
}
} else if (result.ContainsFlag(kInobjectFields)) {
if (instr->IsStoreNamedField() &&
ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
result.RemoveFlag(kInobjectFields);
result.AddSpecial(InobjectField(index));
} else {
for (index = 0; index < kNumberOfInobjectFields; ++index) {
result.AddSpecial(InobjectField(index));
}
}
}
return result;
}
SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
int index;
SideEffects result(instr->DependsOnFlags());
if (result.ContainsFlag(kGlobalVars)) {
if (instr->IsLoadNamedField()) {
HLoadNamedField* load = HLoadNamedField::cast(instr);
HConstant* target = HConstant::cast(load->object());
if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
&index)) {
result.RemoveFlag(kGlobalVars);
result.AddSpecial(GlobalVar(index));
return result;
}
}
for (index = 0; index < kNumberOfGlobalVars; ++index) {
result.AddSpecial(GlobalVar(index));
}
} else if (result.ContainsFlag(kInobjectFields)) {
if (instr->IsLoadNamedField() &&
ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
result.RemoveFlag(kInobjectFields);
result.AddSpecial(InobjectField(index));
} else {
for (index = 0; index < kNumberOfInobjectFields; ++index) {
result.AddSpecial(InobjectField(index));
}
}
}
return result;
}
std::ostream& operator<<(std::ostream& os, const TrackedEffects& te) {
SideEffectsTracker* t = te.tracker;
const char* separator = "";
os << "[";
for (int bit = 0; bit < kNumberOfFlags; ++bit) {
GVNFlag flag = GVNFlagFromInt(bit);
if (te.effects.ContainsFlag(flag)) {
os << separator;
separator = ", ";
switch (flag) {
#define DECLARE_FLAG(Type) \
case k##Type: \
os << #Type; \
break;
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
default:
break;
}
}
}
for (int index = 0; index < t->num_global_vars_; ++index) {
if (te.effects.ContainsSpecial(t->GlobalVar(index))) {
os << separator << "[" << *t->global_vars_[index].handle() << "]";
separator = ", ";
}
}
for (int index = 0; index < t->num_inobject_fields_; ++index) {
if (te.effects.ContainsSpecial(t->InobjectField(index))) {
os << separator << t->inobject_fields_[index];
separator = ", ";
}
}
os << "]";
return os;
}
bool SideEffectsTracker::ComputeGlobalVar(Unique<PropertyCell> cell,
int* index) {
for (int i = 0; i < num_global_vars_; ++i) {
if (cell == global_vars_[i]) {
*index = i;
return true;
}
}
if (num_global_vars_ < kNumberOfGlobalVars) {
if (FLAG_trace_gvn) {
OFStream os(stdout);
os << "Tracking global var [" << *cell.handle() << "] "
<< "(mapped to index " << num_global_vars_ << ")" << std::endl;
}
*index = num_global_vars_;
global_vars_[num_global_vars_++] = cell;
return true;
}
return false;
}
bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
int* index) {
for (int i = 0; i < num_inobject_fields_; ++i) {
if (access.Equals(inobject_fields_[i])) {
*index = i;
return true;
}
}
if (num_inobject_fields_ < kNumberOfInobjectFields) {
if (FLAG_trace_gvn) {
OFStream os(stdout);
os << "Tracking inobject field access " << access << " (mapped to index "
<< num_inobject_fields_ << ")" << std::endl;
}
*index = num_inobject_fields_;
inobject_fields_[num_inobject_fields_++] = access;
return true;
}
return false;
}
HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
: HPhase("H_Global value numbering", graph),
removed_side_effects_(false),
block_side_effects_(graph->blocks()->length(), zone()),
loop_side_effects_(graph->blocks()->length(), zone()),
visited_on_paths_(graph->blocks()->length(), zone()) {
DCHECK(!AllowHandleAllocation::IsAllowed());
block_side_effects_.AddBlock(
SideEffects(), graph->blocks()->length(), zone());
loop_side_effects_.AddBlock(
SideEffects(), graph->blocks()->length(), zone());
}
void HGlobalValueNumberingPhase::Run() {
DCHECK(!removed_side_effects_);
for (int i = FLAG_gvn_iterations; i > 0; --i) {
// Compute the side effects.
ComputeBlockSideEffects();
// Perform loop invariant code motion if requested.
if (FLAG_loop_invariant_code_motion) LoopInvariantCodeMotion();
// Perform the actual value numbering.
AnalyzeGraph();
// Continue GVN if we removed any side effects.
if (!removed_side_effects_) break;
removed_side_effects_ = false;
// Clear all side effects.
DCHECK_EQ(block_side_effects_.length(), graph()->blocks()->length());
DCHECK_EQ(loop_side_effects_.length(), graph()->blocks()->length());
for (int i = 0; i < graph()->blocks()->length(); ++i) {
block_side_effects_[i].RemoveAll();
loop_side_effects_[i].RemoveAll();
}
visited_on_paths_.Clear();
}
}
void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph()->blocks()->at(i);
SideEffects side_effects;
if (block->IsReachable() && !block->IsDeoptimizing()) {
int id = block->block_id();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
}
block_side_effects_[id].Add(side_effects);
// Loop headers are part of their loop.
if (block->IsLoopHeader()) {
loop_side_effects_[id].Add(side_effects);
}
// Propagate loop side effects upwards.
if (block->HasParentLoopHeader()) {
HBasicBlock* with_parent = block;
if (block->IsLoopHeader()) side_effects = loop_side_effects_[id];
do {
HBasicBlock* parent_block = with_parent->parent_loop_header();
loop_side_effects_[parent_block->block_id()].Add(side_effects);
with_parent = parent_block;
} while (with_parent->HasParentLoopHeader());
}
}
}
}
void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
graph()->use_optimistic_licm() ? "yes" : "no");
for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
SideEffects side_effects = loop_side_effects_[block->block_id()];
if (FLAG_trace_gvn) {
OFStream os(stdout);
os << "Try loop invariant motion for " << *block << " changes "
<< Print(side_effects) << std::endl;
}
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
}
}
}
}
void HGlobalValueNumberingPhase::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
SideEffects loop_kills) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
if (FLAG_trace_gvn) {
OFStream os(stdout);
os << "Loop invariant code motion for " << *block << " depends on "
<< Print(loop_kills) << std::endl;
}
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
if (instr->CheckFlag(HValue::kUseGVN)) {
SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
if (FLAG_trace_gvn) {
OFStream os(stdout);
os << "Checking instruction i" << instr->id() << " ("
<< instr->Mnemonic() << ") changes " << Print(changes)
<< ", depends on " << Print(depends_on) << ". Loop changes "
<< Print(loop_kills) << std::endl;
}
bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
if (can_hoist && !graph()->use_optimistic_licm()) {
can_hoist = block->IsLoopSuccessorDominator();
}
if (can_hoist) {
bool inputs_loop_invariant = true;
for (int i = 0; i < instr->OperandCount(); ++i) {
if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
inputs_loop_invariant = false;
}
}
if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
TRACE_GVN_2("Hoisting loop invariant instruction i%d to block B%d\n",
instr->id(), pre_header->block_id());
// Move the instruction out of the loop.
instr->Unlink();
instr->InsertBefore(pre_header->end());
if (instr->HasSideEffects()) removed_side_effects_ = true;
}
}
}
instr = next;
}
}
bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
// If we've disabled code motion or we're in a block that unconditionally
// deoptimizes, don't move any instructions.
return graph()->allow_code_motion() && !instr->block()->IsDeoptimizing() &&
instr->block()->IsReachable();
}
SideEffects
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
SideEffects side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
block->block_id() < dominated->block_id() &&
!visited_on_paths_.Contains(block->block_id())) {
visited_on_paths_.Add(block->block_id());
side_effects.Add(block_side_effects_[block->block_id()]);
if (block->IsLoopHeader()) {
side_effects.Add(loop_side_effects_[block->block_id()]);
}
side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
dominator, block));
}
}
return side_effects;
}
// Each instance of this class is like a "stack frame" for the recursive
// traversal of the dominator tree done during GVN (the stack is handled
// as a double linked list).
// We reuse frames when possible so the list length is limited by the depth
// of the dominator tree but this forces us to initialize each frame calling
// an explicit "Initialize" method instead of a using constructor.
class GvnBasicBlockState: public ZoneObject {
public:
static GvnBasicBlockState* CreateEntry(Zone* zone,
HBasicBlock* entry_block,
HInstructionMap* entry_map) {
return new(zone)
GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
}
HBasicBlock* block() { return block_; }
HInstructionMap* map() { return map_; }
HSideEffectMap* dominators() { return &dominators_; }
GvnBasicBlockState* next_in_dominator_tree_traversal(
Zone* zone,
HBasicBlock** dominator) {
// This assignment needs to happen before calling next_dominated() because
// that call can reuse "this" if we are at the last dominated block.
*dominator = block();
GvnBasicBlockState* result = next_dominated(zone);
if (result == NULL) {
GvnBasicBlockState* dominator_state = pop();
if (dominator_state != NULL) {
// This branch is guaranteed not to return NULL because pop() never
// returns a state where "is_done() == true".
*dominator = dominator_state->block();
result = dominator_state->next_dominated(zone);
} else {
// Unnecessary (we are returning NULL) but done for cleanness.
*dominator = NULL;
}
}
return result;
}
private:
void Initialize(HBasicBlock* block,
HInstructionMap* map,
HSideEffectMap* dominators,
bool copy_map,
Zone* zone) {
block_ = block;
map_ = copy_map ? map->Copy(zone) : map;
dominated_index_ = -1;
length_ = block->dominated_blocks()->length();
if (dominators != NULL) {
dominators_ = *dominators;
}
}
bool is_done() { return dominated_index_ >= length_; }
GvnBasicBlockState(GvnBasicBlockState* previous,
HBasicBlock* block,
HInstructionMap* map,
HSideEffectMap* dominators,
Zone* zone)
: previous_(previous), next_(NULL) {
Initialize(block, map, dominators, true, zone);
}
GvnBasicBlockState* next_dominated(Zone* zone) {
dominated_index_++;
if (dominated_index_ == length_ - 1) {
// No need to copy the map for the last child in the dominator tree.
Initialize(block_->dominated_blocks()->at(dominated_index_),
map(),
dominators(),
false,
zone);
return this;
} else if (dominated_index_ < length_) {
return push(zone, block_->dominated_blocks()->at(dominated_index_));
} else {
return NULL;
}
}
GvnBasicBlockState* push(Zone* zone, HBasicBlock* block) {
if (next_ == NULL) {
next_ =
new(zone) GvnBasicBlockState(this, block, map(), dominators(), zone);
} else {
next_->Initialize(block, map(), dominators(), true, zone);
}
return next_;
}
GvnBasicBlockState* pop() {
GvnBasicBlockState* result = previous_;
while (result != NULL && result->is_done()) {
TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
block()->block_id(),
previous_->block()->block_id())
result = result->previous_;
}
return result;
}
GvnBasicBlockState* previous_;
GvnBasicBlockState* next_;
HBasicBlock* block_;
HInstructionMap* map_;
HSideEffectMap dominators_;
int dominated_index_;
int length_;
};
// This is a recursive traversal of the dominator tree but it has been turned
// into a loop to avoid stack overflows.
// The logical "stack frames" of the recursion are kept in a list of
// GvnBasicBlockState instances.
void HGlobalValueNumberingPhase::AnalyzeGraph() {
HBasicBlock* entry_block = graph()->entry_block();
HInstructionMap* entry_map =
new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
while (current != NULL) {
HBasicBlock* block = current->block();
HInstructionMap* map = current->map();
HSideEffectMap* dominators = current->dominators();
TRACE_GVN_2("Analyzing block B%d%s\n",
block->block_id(),
block->IsLoopHeader() ? " (loop header)" : "");
// If this is a loop header kill everything killed by the loop.
if (block->IsLoopHeader()) {
map->Kill(loop_side_effects_[block->block_id()]);
dominators->Kill(loop_side_effects_[block->block_id()]);
}
// Go through all instructions of the current block.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
GVNFlag flag = GVNFlagFromInt(i);
if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
i,
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
if (instr->HandleSideEffectDominator(flag, other)) {
removed_side_effects_ = true;
}
}
}
}
// Instruction was unlinked during graph traversal.
if (!instr->IsLinked()) continue;
SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
if (!changes.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
// Store instruction as the dominating one for tracked side effects.
map->Kill(changes);
dominators->Store(changes, instr);
if (FLAG_trace_gvn) {
OFStream os(stdout);
os << "Instruction i" << instr->id() << " changes " << Print(changes)
<< std::endl;
}
}
if (instr->CheckFlag(HValue::kUseGVN) &&
!instr->CheckFlag(HValue::kCantBeReplaced)) {
DCHECK(!instr->HasObservableSideEffects());
HInstruction* other = map->Lookup(instr);
if (other != NULL) {
DCHECK(instr->Equals(other) && other->Equals(instr));
TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
if (instr->HasSideEffects()) removed_side_effects_ = true;
instr->DeleteAndReplaceWith(other);
} else {
map->Add(instr, zone());
}
}
}
HBasicBlock* dominator_block;
GvnBasicBlockState* next =
current->next_in_dominator_tree_traversal(zone(),
&dominator_block);
if (next != NULL) {
HBasicBlock* dominated = next->block();
HInstructionMap* successor_map = next->map();
HSideEffectMap* successor_dominators = next->dominators();
// Kill everything killed on any path between this block and the
// dominated block. We don't have to traverse these paths if the
// value map and the dominators list is already empty. If the range
// of block ids (block_id, dominated_id) is empty there are no such
// paths.
if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
dominator_block->block_id() + 1 < dominated->block_id()) {
visited_on_paths_.Clear();
SideEffects side_effects_on_all_paths =
CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
dominated);
successor_map->Kill(side_effects_on_all_paths);
successor_dominators->Kill(side_effects_on_all_paths);
}
}
current = next;
}
}
} // namespace internal
} // namespace v8

View File

@ -1,153 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_GVN_H_
#define V8_CRANKSHAFT_HYDROGEN_GVN_H_
#include <iosfwd>
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/crankshaft/hydrogen.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
// This class extends GVNFlagSet with additional "special" dynamic side effects,
// which can be used to represent side effects that cannot be expressed using
// the GVNFlags of an HInstruction. These special side effects are tracked by a
// SideEffectsTracker (see below).
class SideEffects final {
public:
static const int kNumberOfSpecials = 64 - kNumberOfFlags;
SideEffects() : bits_(0) {
DCHECK(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
}
explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
bool IsEmpty() const { return bits_ == 0; }
bool ContainsFlag(GVNFlag flag) const {
return (bits_ & MaskFlag(flag)) != 0;
}
bool ContainsSpecial(int special) const {
return (bits_ & MaskSpecial(special)) != 0;
}
bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
void Add(SideEffects set) { bits_ |= set.bits_; }
void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
void RemoveAll() { bits_ = 0; }
uint64_t ToIntegral() const { return bits_; }
private:
uint64_t MaskFlag(GVNFlag flag) const {
return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
}
uint64_t MaskSpecial(int special) const {
DCHECK(special >= 0);
DCHECK(special < kNumberOfSpecials);
return static_cast<uint64_t>(1) << static_cast<unsigned>(
special + kNumberOfFlags);
}
uint64_t bits_;
};
struct TrackedEffects;
// Tracks global variable and inobject field loads/stores in a fine grained
// fashion, and represents them using the "special" dynamic side effects of the
// SideEffects class (see above). This way unrelated global variable/inobject
// field stores don't prevent hoisting and merging of global variable/inobject
// field loads.
class SideEffectsTracker final BASE_EMBEDDED {
public:
SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
SideEffects ComputeChanges(HInstruction* instr);
SideEffects ComputeDependsOn(HInstruction* instr);
private:
friend std::ostream& operator<<(std::ostream& os, const TrackedEffects& f);
bool ComputeGlobalVar(Unique<PropertyCell> cell, int* index);
bool ComputeInobjectField(HObjectAccess access, int* index);
static int GlobalVar(int index) {
DCHECK(index >= 0);
DCHECK(index < kNumberOfGlobalVars);
return index;
}
static int InobjectField(int index) {
DCHECK(index >= 0);
DCHECK(index < kNumberOfInobjectFields);
return index + kNumberOfGlobalVars;
}
// Track up to four global vars.
static const int kNumberOfGlobalVars = 4;
Unique<PropertyCell> global_vars_[kNumberOfGlobalVars];
int num_global_vars_;
// Track up to n inobject fields.
static const int kNumberOfInobjectFields =
SideEffects::kNumberOfSpecials - kNumberOfGlobalVars;
HObjectAccess inobject_fields_[kNumberOfInobjectFields];
int num_inobject_fields_;
};
// Helper class for printing, because the effects don't know their tracker.
struct TrackedEffects {
TrackedEffects(SideEffectsTracker* t, SideEffects e)
: tracker(t), effects(e) {}
SideEffectsTracker* tracker;
SideEffects effects;
};
std::ostream& operator<<(std::ostream& os, const TrackedEffects& f);
// Perform common subexpression elimination and loop-invariant code motion.
class HGlobalValueNumberingPhase final : public HPhase {
public:
explicit HGlobalValueNumberingPhase(HGraph* graph);
void Run();
private:
SideEffects CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
void ComputeBlockSideEffects();
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
SideEffects loop_kills);
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
TrackedEffects Print(SideEffects side_effects) {
return TrackedEffects(&side_effects_tracker_, side_effects);
}
SideEffectsTracker side_effects_tracker_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
ZoneList<SideEffects> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
ZoneList<SideEffects> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
BitVector visited_on_paths_;
DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_GVN_H_

View File

@ -1,163 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-infer-representation.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HInferRepresentationPhase::AddToWorklist(HValue* current) {
if (current->representation().IsTagged()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
if (in_worklist_.Contains(current->id())) return;
worklist_.Add(current, zone());
in_worklist_.Add(current->id());
}
void HInferRepresentationPhase::Run() {
// (1) Initialize bit vectors and count real uses. Each phi gets a
// bit-vector of length <number of phis>.
const ZoneList<HPhi*>* phi_list = graph()->phi_list();
int phi_count = phi_list->length();
ZoneList<BitVector*> connected_phis(phi_count, zone());
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->InitRealUses(i);
BitVector* connected_set = new(zone()) BitVector(phi_count, zone());
connected_set->Add(i);
connected_phis.Add(connected_set, zone());
}
// (2) Do a fixed point iteration to find the set of connected phis. A
// phi is connected to another phi if its value is used either directly or
// indirectly through a transitive closure of the def-use relation.
bool change = true;
while (change) {
change = false;
// We normally have far more "forward edges" than "backward edges",
// so we terminate faster when we walk backwards.
for (int i = phi_count - 1; i >= 0; --i) {
HPhi* phi = phi_list->at(i);
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->IsPhi()) {
int id = HPhi::cast(use)->phi_id();
if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
change = true;
}
}
}
}
// Set truncation flags for groups of connected phis. This is a conservative
// approximation; the flag will be properly re-computed after representations
// have been determined.
if (phi_count > 0) {
BitVector done(phi_count, zone());
for (int i = 0; i < phi_count; ++i) {
if (done.Contains(i)) continue;
// Check if all uses of all connected phis in this group are truncating.
bool all_uses_everywhere_truncating_int32 = true;
bool all_uses_everywhere_truncating_smi = true;
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
all_uses_everywhere_truncating_int32 &=
phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
all_uses_everywhere_truncating_smi &=
phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi);
done.Add(index);
}
if (!all_uses_everywhere_truncating_int32) {
// Clear truncation flag of this group of connected phis.
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
}
}
if (!all_uses_everywhere_truncating_smi) {
// Clear truncation flag of this group of connected phis.
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi);
}
}
}
}
// Simplify constant phi inputs where possible.
// This step uses kTruncatingToInt32 flags of phis.
for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->SimplifyConstantInputs();
}
// Use the phi reachability information from step 2 to
// sum up the non-phi use counts of all connected phis.
for (int i = 0; i < phi_count; ++i) {
HPhi* phi = phi_list->at(i);
for (BitVector::Iterator it(connected_phis[i]);
!it.Done();
it.Advance()) {
int index = it.Current();
HPhi* it_use = phi_list->at(index);
if (index != i) phi->AddNonPhiUsesFrom(it_use); // Don't count twice.
}
}
// Initialize work list
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
const ZoneList<HPhi*>* phis = block->phis();
for (int j = 0; j < phis->length(); ++j) {
AddToWorklist(phis->at(j));
}
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
AddToWorklist(current);
}
}
// Do a fixed point iteration, trying to improve representations
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
current->InferRepresentation(this);
in_worklist_.Remove(current->id());
}
// Lastly: any instruction that we don't have representation information
// for defaults to Tagged.
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
const ZoneList<HPhi*>* phis = block->phis();
for (int j = 0; j < phis->length(); ++j) {
HPhi* phi = phis->at(j);
if (phi->representation().IsNone()) {
phi->ChangeRepresentation(Representation::Tagged());
}
}
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->representation().IsNone() &&
current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
if (current->CheckFlag(HInstruction::kCannotBeTagged)) {
current->ChangeRepresentation(Representation::Double());
} else {
current->ChangeRepresentation(Representation::Tagged());
}
}
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,35 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
#define V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HInferRepresentationPhase : public HPhase {
public:
explicit HInferRepresentationPhase(HGraph* graph)
: HPhase("H_Infer representations", graph),
worklist_(8, zone()),
in_worklist_(graph->GetMaximumValueID(), zone()) { }
void Run();
void AddToWorklist(HValue* current);
private:
ZoneList<HValue*> worklist_;
BitVector in_worklist_;
DISALLOW_COPY_AND_ASSIGN(HInferRepresentationPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_

View File

@ -1,56 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-infer-types.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HInferTypesPhase::InferTypes(int from_inclusive, int to_inclusive) {
for (int i = from_inclusive; i <= to_inclusive; ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
const ZoneList<HPhi*>* phis = block->phis();
for (int j = 0; j < phis->length(); j++) {
phis->at(j)->UpdateInferredType();
}
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
it.Current()->UpdateInferredType();
}
if (block->IsLoopHeader()) {
HBasicBlock* last_back_edge =
block->loop_information()->GetLastBackEdge();
InferTypes(i + 1, last_back_edge->block_id());
// Skip all blocks already processed by the recursive call.
i = last_back_edge->block_id();
// Update phis of the loop header now after the whole loop body is
// guaranteed to be processed.
for (int j = 0; j < block->phis()->length(); ++j) {
HPhi* phi = block->phis()->at(j);
worklist_.Add(phi, zone());
in_worklist_.Add(phi->id());
}
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
in_worklist_.Remove(current->id());
if (current->UpdateInferredType()) {
for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!in_worklist_.Contains(use->id())) {
in_worklist_.Add(use->id());
worklist_.Add(use, zone());
}
}
}
}
DCHECK(in_worklist_.IsEmpty());
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,37 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
#define V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HInferTypesPhase : public HPhase {
public:
explicit HInferTypesPhase(HGraph* graph)
: HPhase("H_Inferring types", graph), worklist_(8, zone()),
in_worklist_(graph->GetMaximumValueID(), zone()) { }
void Run() {
InferTypes(0, graph()->blocks()->length() - 1);
}
private:
void InferTypes(int from_inclusive, int to_inclusive);
ZoneList<HValue*> worklist_;
BitVector in_worklist_;
DISALLOW_COPY_AND_ASSIGN(HInferTypesPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,512 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-load-elimination.h"
#include "src/crankshaft/hydrogen-alias-analysis.h"
#include "src/crankshaft/hydrogen-flow-engine.h"
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
#define GLOBAL true
#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
static const int kMaxTrackedFields = 16;
static const int kMaxTrackedObjects = 5;
// An element in the field approximation list.
class HFieldApproximation : public ZoneObject {
public: // Just a data blob.
HValue* object_;
HValue* last_value_;
HFieldApproximation* next_;
// Recursively copy the entire linked list of field approximations.
HFieldApproximation* Copy(Zone* zone) {
HFieldApproximation* copy = new(zone) HFieldApproximation();
copy->object_ = this->object_;
copy->last_value_ = this->last_value_;
copy->next_ = this->next_ == NULL ? NULL : this->next_->Copy(zone);
return copy;
}
};
// The main datastructure used during load/store elimination. Each in-object
// field is tracked separately. For each field, store a list of known field
// values for known objects.
class HLoadEliminationTable : public ZoneObject {
public:
HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
: zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
// The main processing of instructions.
HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) {
switch (instr->opcode()) {
case HValue::kLoadNamedField: {
HLoadNamedField* l = HLoadNamedField::cast(instr);
TRACE((" process L%d field %d (o%d)\n",
instr->id(),
FieldOf(l->access()),
l->object()->ActualValue()->id()));
HValue* result = load(l);
if (result != instr && l->CanBeReplacedWith(result)) {
// The load can be replaced with a previous load or a value.
TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
instr->DeleteAndReplaceWith(result);
}
break;
}
case HValue::kStoreNamedField: {
HStoreNamedField* s = HStoreNamedField::cast(instr);
TRACE((" process S%d field %d (o%d) = v%d\n",
instr->id(),
FieldOf(s->access()),
s->object()->ActualValue()->id(),
s->value()->id()));
HValue* result = store(s);
if (result == NULL) {
// The store is redundant. Remove it.
TRACE((" remove S%d\n", instr->id()));
instr->DeleteAndReplaceWith(NULL);
}
break;
}
case HValue::kTransitionElementsKind: {
HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
HValue* object = t->object()->ActualValue();
KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
break;
}
default: {
if (instr->CheckChangesFlag(kInobjectFields)) {
TRACE((" kill-all i%d\n", instr->id()));
Kill();
break;
}
if (instr->CheckChangesFlag(kMaps)) {
TRACE((" kill-maps i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
}
if (instr->CheckChangesFlag(kElementsKind)) {
TRACE((" kill-elements-kind i%d\n", instr->id()));
KillOffset(JSObject::kMapOffset);
KillOffset(JSObject::kElementsOffset);
}
if (instr->CheckChangesFlag(kElementsPointer)) {
TRACE((" kill-elements i%d\n", instr->id()));
KillOffset(JSObject::kElementsOffset);
}
if (instr->CheckChangesFlag(kOsrEntries)) {
TRACE((" kill-osr i%d\n", instr->id()));
Kill();
}
}
// Improvements possible:
// - learn from HCheckMaps for field 0
// - remove unobservable stores (write-after-write)
// - track cells
// - track globals
// - track roots
}
return this;
}
// Support for global analysis with HFlowEngine: Merge given state with
// the other incoming state.
static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
HBasicBlock* succ_block,
HLoadEliminationTable* pred_state,
HBasicBlock* pred_block,
Zone* zone) {
DCHECK(pred_state != NULL);
if (succ_state == NULL) {
return pred_state->Copy(succ_block, pred_block, zone);
} else {
return succ_state->Merge(succ_block, pred_state, pred_block, zone);
}
}
// Support for global analysis with HFlowEngine: Given state merged with all
// the other incoming states, prepare it for use.
static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
HBasicBlock* block,
Zone* zone) {
DCHECK(state != NULL);
return state;
}
private:
// Copy state to successor block.
HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
Zone* zone) {
HLoadEliminationTable* copy =
new(zone) HLoadEliminationTable(zone, aliasing_);
copy->EnsureFields(fields_.length());
for (int i = 0; i < fields_.length(); i++) {
copy->fields_[i] = fields_[i] == NULL ? NULL : fields_[i]->Copy(zone);
}
if (FLAG_trace_load_elimination) {
TRACE((" copy-to B%d\n", succ->block_id()));
copy->Print();
}
return copy;
}
// Merge this state with the other incoming state.
HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
HBasicBlock* that_block, Zone* zone) {
if (that->fields_.length() < fields_.length()) {
// Drop fields not in the other table.
fields_.Rewind(that->fields_.length());
}
for (int i = 0; i < fields_.length(); i++) {
// Merge the field approximations for like fields.
HFieldApproximation* approx = fields_[i];
HFieldApproximation* prev = NULL;
while (approx != NULL) {
// TODO(titzer): Merging is O(N * M); sort?
HFieldApproximation* other = that->Find(approx->object_, i);
if (other == NULL || !Equal(approx->last_value_, other->last_value_)) {
// Kill an entry that doesn't agree with the other value.
if (prev != NULL) {
prev->next_ = approx->next_;
} else {
fields_[i] = approx->next_;
}
approx = approx->next_;
continue;
}
prev = approx;
approx = approx->next_;
}
}
if (FLAG_trace_load_elimination) {
TRACE((" merge-to B%d\n", succ->block_id()));
Print();
}
return this;
}
friend class HLoadEliminationEffects; // Calls Kill() and others.
friend class HLoadEliminationPhase;
private:
// Process a load instruction, updating internal table state. If a previous
// load or store for this object and field exists, return the new value with
// which the load should be replaced. Otherwise, return {instr}.
HValue* load(HLoadNamedField* instr) {
// There must be no loads from non observable in-object properties.
DCHECK(!instr->access().IsInobject() ||
instr->access().existing_inobject_property());
int field = FieldOf(instr->access());
if (field < 0) return instr;
HValue* object = instr->object()->ActualValue();
HFieldApproximation* approx = FindOrCreate(object, field);
if (approx->last_value_ == NULL) {
// Load is not redundant. Fill out a new entry.
approx->last_value_ = instr;
return instr;
} else if (approx->last_value_->block()->EqualToOrDominates(
instr->block())) {
// Eliminate the load. Reuse previously stored value or load instruction.
return approx->last_value_;
} else {
return instr;
}
}
// Process a store instruction, updating internal table state. If a previous
// store to the same object and field makes this store redundant (e.g. because
// the stored values are the same), return NULL indicating that this store
// instruction is redundant. Otherwise, return {instr}.
HValue* store(HStoreNamedField* instr) {
if (instr->access().IsInobject() &&
!instr->access().existing_inobject_property()) {
TRACE((" skipping non existing property initialization store\n"));
return instr;
}
int field = FieldOf(instr->access());
if (field < 0) return KillIfMisaligned(instr);
HValue* object = instr->object()->ActualValue();
HValue* value = instr->value();
if (instr->has_transition()) {
// A transition introduces a new field and alters the map of the object.
// Since the field in the object is new, it cannot alias existing entries.
KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
} else {
// Kill non-equivalent may-alias entries.
KillFieldInternal(object, field, value);
}
HFieldApproximation* approx = FindOrCreate(object, field);
if (Equal(approx->last_value_, value)) {
// The store is redundant because the field already has this value.
return NULL;
} else {
// The store is not redundant. Update the entry.
approx->last_value_ = value;
return instr;
}
}
// Kill everything in this table.
void Kill() {
fields_.Rewind(0);
}
// Kill all entries matching the given offset.
void KillOffset(int offset) {
int field = FieldOf(offset);
if (field >= 0 && field < fields_.length()) {
fields_[field] = NULL;
}
}
// Kill all entries aliasing the given store.
void KillStore(HStoreNamedField* s) {
int field = FieldOf(s->access());
if (field >= 0) {
KillFieldInternal(s->object()->ActualValue(), field, s->value());
} else {
KillIfMisaligned(s);
}
}
// Kill multiple entries in the case of a misaligned store.
HValue* KillIfMisaligned(HStoreNamedField* instr) {
HObjectAccess access = instr->access();
if (access.IsInobject()) {
int offset = access.offset();
if ((offset % kPointerSize) != 0) {
// Kill the field containing the first word of the access.
HValue* object = instr->object()->ActualValue();
int field = offset / kPointerSize;
KillFieldInternal(object, field, NULL);
// Kill the next field in case of overlap.
int size = access.representation().size();
int next_field = (offset + size - 1) / kPointerSize;
if (next_field != field) KillFieldInternal(object, next_field, NULL);
}
}
return instr;
}
// Find an entry for the given object and field pair.
HFieldApproximation* Find(HValue* object, int field) {
// Search for a field approximation for this object.
HFieldApproximation* approx = fields_[field];
while (approx != NULL) {
if (aliasing_->MustAlias(object, approx->object_)) return approx;
approx = approx->next_;
}
return NULL;
}
// Find or create an entry for the given object and field pair.
HFieldApproximation* FindOrCreate(HValue* object, int field) {
EnsureFields(field + 1);
// Search for a field approximation for this object.
HFieldApproximation* approx = fields_[field];
int count = 0;
while (approx != NULL) {
if (aliasing_->MustAlias(object, approx->object_)) return approx;
count++;
approx = approx->next_;
}
if (count >= kMaxTrackedObjects) {
// Pull the last entry off the end and repurpose it for this object.
approx = ReuseLastApproximation(field);
} else {
// Allocate a new entry.
approx = new(zone_) HFieldApproximation();
}
// Insert the entry at the head of the list.
approx->object_ = object;
approx->last_value_ = NULL;
approx->next_ = fields_[field];
fields_[field] = approx;
return approx;
}
// Kill all entries for a given field that _may_ alias the given object
// and do _not_ have the given value.
void KillFieldInternal(HValue* object, int field, HValue* value) {
if (field >= fields_.length()) return; // Nothing to do.
HFieldApproximation* approx = fields_[field];
HFieldApproximation* prev = NULL;
while (approx != NULL) {
if (aliasing_->MayAlias(object, approx->object_)) {
if (!Equal(approx->last_value_, value)) {
// Kill an aliasing entry that doesn't agree on the value.
if (prev != NULL) {
prev->next_ = approx->next_;
} else {
fields_[field] = approx->next_;
}
approx = approx->next_;
continue;
}
}
prev = approx;
approx = approx->next_;
}
}
bool Equal(HValue* a, HValue* b) {
if (a == b) return true;
if (a != NULL && b != NULL && a->CheckFlag(HValue::kUseGVN)) {
return a->Equals(b);
}
return false;
}
// Remove the last approximation for a field so that it can be reused.
// We reuse the last entry because it was the first inserted and is thus
// farthest away from the current instruction.
HFieldApproximation* ReuseLastApproximation(int field) {
HFieldApproximation* approx = fields_[field];
DCHECK(approx != NULL);
HFieldApproximation* prev = NULL;
while (approx->next_ != NULL) {
prev = approx;
approx = approx->next_;
}
if (prev != NULL) prev->next_ = NULL;
return approx;
}
// Compute the field index for the given object access; -1 if not tracked.
int FieldOf(HObjectAccess access) {
return access.IsInobject() ? FieldOf(access.offset()) : -1;
}
// Compute the field index for the given in-object offset; -1 if not tracked.
int FieldOf(int offset) {
if (offset >= kMaxTrackedFields * kPointerSize) return -1;
if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses.
return offset / kPointerSize;
}
// Ensure internal storage for the given number of fields.
void EnsureFields(int num_fields) {
if (fields_.length() < num_fields) {
fields_.AddBlock(NULL, num_fields - fields_.length(), zone_);
}
}
// Print this table to stdout.
void Print() {
for (int i = 0; i < fields_.length(); i++) {
PrintF(" field %d: ", i);
for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
PrintF("[o%d =", a->object_->id());
if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
PrintF("] ");
}
PrintF("\n");
}
}
Zone* zone_;
ZoneList<HFieldApproximation*> fields_;
HAliasAnalyzer* aliasing_;
};
// Support for HFlowEngine: collect store effects within loops.
class HLoadEliminationEffects : public ZoneObject {
public:
explicit HLoadEliminationEffects(Zone* zone)
: zone_(zone), stores_(5, zone) { }
inline bool Disabled() {
return false; // Effects are _not_ disabled.
}
// Process a possibly side-effecting instruction.
void Process(HInstruction* instr, Zone* zone) {
if (instr->IsStoreNamedField()) {
stores_.Add(HStoreNamedField::cast(instr), zone_);
} else {
flags_.Add(instr->ChangesFlags());
}
}
// Apply these effects to the given load elimination table.
void Apply(HLoadEliminationTable* table) {
// Loads must not be hoisted past the OSR entry, therefore we kill
// everything if we see an OSR entry.
if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
table->Kill();
return;
}
if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
table->KillOffset(JSObject::kMapOffset);
}
if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
table->KillOffset(JSObject::kElementsOffset);
}
// Kill non-agreeing fields for each store contained in these effects.
for (int i = 0; i < stores_.length(); i++) {
table->KillStore(stores_[i]);
}
}
// Union these effects with the other effects.
void Union(HLoadEliminationEffects* that, Zone* zone) {
flags_.Add(that->flags_);
for (int i = 0; i < that->stores_.length(); i++) {
stores_.Add(that->stores_[i], zone);
}
}
private:
Zone* zone_;
GVNFlagSet flags_;
ZoneList<HStoreNamedField*> stores_;
};
// The main routine of the analysis phase. Use the HFlowEngine for either a
// local or a global analysis.
void HLoadEliminationPhase::Run() {
HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects>
engine(graph(), zone());
HAliasAnalyzer aliasing;
HLoadEliminationTable* table =
new(zone()) HLoadEliminationTable(zone(), &aliasing);
if (GLOBAL) {
// Perform a global analysis.
engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
} else {
// Perform only local analysis.
for (int i = 0; i < graph()->blocks()->length(); i++) {
table->Kill();
engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,28 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
#define V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HLoadEliminationPhase : public HPhase {
public:
explicit HLoadEliminationPhase(HGraph* graph)
: HPhase("H_Load elimination", graph) { }
void Run();
private:
void EliminateLoads(HBasicBlock* block);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_

View File

@ -1,56 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-mark-unreachable.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() {
// If there is unreachable code in the graph, propagate the unreachable marks
// using a fixed-point iteration.
bool changed = true;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
while (changed) {
changed = false;
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* block = blocks->at(i);
if (!block->IsReachable()) continue;
bool is_reachable = blocks->at(0) == block;
for (HPredecessorIterator it(block); !it.Done(); it.Advance()) {
HBasicBlock* predecessor = it.Current();
// A block is reachable if one of its predecessors is reachable,
// doesn't deoptimize and either is known to transfer control to the
// block or has a control flow instruction for which the next block
// cannot be determined.
if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) {
HBasicBlock* pred_succ;
bool known_pred_succ =
predecessor->end()->KnownSuccessorBlock(&pred_succ);
if (!known_pred_succ || pred_succ == block) {
is_reachable = true;
break;
}
}
if (block->is_osr_entry()) {
is_reachable = true;
}
}
if (!is_reachable) {
block->MarkUnreachable();
changed = true;
}
}
}
}
void HMarkUnreachableBlocksPhase::Run() {
MarkUnreachableBlocks();
}
} // namespace internal
} // namespace v8

View File

@ -1,31 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
#define V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HMarkUnreachableBlocksPhase : public HPhase {
public:
explicit HMarkUnreachableBlocksPhase(HGraph* graph)
: HPhase("H_Mark unreachable blocks", graph) { }
void Run();
private:
void MarkUnreachableBlocks();
DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_

View File

@ -1,286 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-range-analysis.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
class Pending {
public:
Pending(HBasicBlock* block, int last_changed_range)
: block_(block), last_changed_range_(last_changed_range) {}
HBasicBlock* block() const { return block_; }
int last_changed_range() const { return last_changed_range_; }
private:
HBasicBlock* block_;
int last_changed_range_;
};
void HRangeAnalysisPhase::TraceRange(const char* msg, ...) {
if (FLAG_trace_range) {
va_list arguments;
va_start(arguments, msg);
base::OS::VPrint(msg, arguments);
va_end(arguments);
}
}
void HRangeAnalysisPhase::Run() {
HBasicBlock* block(graph()->entry_block());
ZoneList<Pending> stack(graph()->blocks()->length(), zone());
while (block != NULL) {
TraceRange("Analyzing block B%d\n", block->block_id());
// Infer range based on control flow.
if (block->predecessors()->length() == 1) {
HBasicBlock* pred = block->predecessors()->first();
if (pred->end()->IsCompareNumericAndBranch()) {
InferControlFlowRange(HCompareNumericAndBranch::cast(pred->end()),
block);
}
}
// Process phi instructions.
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
InferRange(phi);
}
// Go through all instructions of the current block.
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HValue* value = it.Current();
InferRange(value);
// Compute the bailout-on-minus-zero flag.
if (value->IsChange()) {
HChange* instr = HChange::cast(value);
// Propagate flags for negative zero checks upwards from conversions
// int32-to-tagged and int32-to-double.
Representation from = instr->value()->representation();
DCHECK(from.Equals(instr->from()));
if (from.IsSmiOrInteger32()) {
DCHECK(instr->to().IsTagged() ||
instr->to().IsDouble() ||
instr->to().IsSmiOrInteger32());
PropagateMinusZeroChecks(instr->value());
}
}
}
// Continue analysis in all dominated blocks.
const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
if (!dominated_blocks->is_empty()) {
// Continue with first dominated block, and push the
// remaining blocks on the stack (in reverse order).
int last_changed_range = changed_ranges_.length();
for (int i = dominated_blocks->length() - 1; i > 0; --i) {
stack.Add(Pending(dominated_blocks->at(i), last_changed_range), zone());
}
block = dominated_blocks->at(0);
} else if (!stack.is_empty()) {
// Pop next pending block from stack.
Pending pending = stack.RemoveLast();
RollBackTo(pending.last_changed_range());
block = pending.block();
} else {
// All blocks done.
block = NULL;
}
}
// The ranges are not valid anymore due to SSI vs. SSA!
PoisonRanges();
}
void HRangeAnalysisPhase::PoisonRanges() {
#ifdef DEBUG
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (instr->HasRange()) instr->PoisonRange();
}
}
#endif
}
void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test,
HBasicBlock* dest) {
DCHECK((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
if (test->representation().IsSmiOrInteger32()) {
Token::Value op = test->token();
if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op);
}
Token::Value inverted_op = Token::ReverseCompareOp(op);
UpdateControlFlowRange(op, test->left(), test->right());
UpdateControlFlowRange(inverted_op, test->right(), test->left());
}
}
// We know that value [op] other. Use this information to update the range on
// value.
void HRangeAnalysisPhase::UpdateControlFlowRange(Token::Value op,
HValue* value,
HValue* other) {
Range temp_range;
Range* range = other->range() != NULL ? other->range() : &temp_range;
Range* new_range = NULL;
TraceRange("Control flow range infer %d %s %d\n",
value->id(),
Token::Name(op),
other->id());
if (op == Token::EQ || op == Token::EQ_STRICT) {
// The same range has to apply for value.
new_range = range->Copy(graph()->zone());
} else if (op == Token::LT || op == Token::LTE) {
new_range = range->CopyClearLower(graph()->zone());
if (op == Token::LT) {
new_range->AddConstant(-1);
}
} else if (op == Token::GT || op == Token::GTE) {
new_range = range->CopyClearUpper(graph()->zone());
if (op == Token::GT) {
new_range->AddConstant(1);
}
}
if (new_range != NULL && !new_range->IsMostGeneric()) {
AddRange(value, new_range);
}
}
void HRangeAnalysisPhase::InferRange(HValue* value) {
DCHECK(!value->HasRange());
if (!value->representation().IsNone()) {
value->ComputeInitialRange(graph()->zone());
Range* range = value->range();
TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
value->id(),
value->Mnemonic(),
range->lower(),
range->upper());
}
}
void HRangeAnalysisPhase::RollBackTo(int index) {
DCHECK(index <= changed_ranges_.length());
for (int i = index; i < changed_ranges_.length(); ++i) {
changed_ranges_[i]->RemoveLastAddedRange();
}
changed_ranges_.Rewind(index);
}
void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
Range* original_range = value->range();
value->AddNewRange(range, graph()->zone());
changed_ranges_.Add(value, zone());
Range* new_range = value->range();
TraceRange("Updated range of %d set to [%d,%d]\n",
value->id(),
new_range->lower(),
new_range->upper());
if (original_range != NULL) {
TraceRange("Original range was [%d,%d]\n",
original_range->lower(),
original_range->upper());
}
TraceRange("New information was [%d,%d]\n",
range->lower(),
range->upper());
}
void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
DCHECK(worklist_.is_empty());
DCHECK(in_worklist_.IsEmpty());
AddToWorklist(value);
while (!worklist_.is_empty()) {
value = worklist_.RemoveLast();
if (value->IsPhi()) {
// For phis, we must propagate the check to all of its inputs.
HPhi* phi = HPhi::cast(value);
for (int i = 0; i < phi->OperandCount(); ++i) {
AddToWorklist(phi->OperandAt(i));
}
} else if (value->IsUnaryMathOperation()) {
HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
if (instr->representation().IsSmiOrInteger32() &&
!instr->value()->representation().Equals(instr->representation())) {
if (instr->value()->range() == NULL ||
instr->value()->range()->CanBeMinusZero()) {
instr->SetFlag(HValue::kBailoutOnMinusZero);
}
}
if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
instr->representation().Equals(
instr->RequiredInputRepresentation(0))) {
AddToWorklist(instr->value());
}
} else if (value->IsChange()) {
HChange* instr = HChange::cast(value);
if (!instr->from().IsSmiOrInteger32() &&
!instr->CanTruncateToInt32() &&
(instr->value()->range() == NULL ||
instr->value()->range()->CanBeMinusZero())) {
instr->SetFlag(HValue::kBailoutOnMinusZero);
}
} else if (value->IsForceRepresentation()) {
HForceRepresentation* instr = HForceRepresentation::cast(value);
AddToWorklist(instr->value());
} else if (value->IsMod()) {
HMod* instr = HMod::cast(value);
if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
instr->SetFlag(HValue::kBailoutOnMinusZero);
AddToWorklist(instr->left());
}
} else if (value->IsDiv() || value->IsMul()) {
HBinaryOperation* instr = HBinaryOperation::cast(value);
if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
instr->SetFlag(HValue::kBailoutOnMinusZero);
}
AddToWorklist(instr->right());
AddToWorklist(instr->left());
} else if (value->IsMathFloorOfDiv()) {
HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
instr->SetFlag(HValue::kBailoutOnMinusZero);
} else if (value->IsAdd() || value->IsSub()) {
HBinaryOperation* instr = HBinaryOperation::cast(value);
if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
// Propagate to the left argument. If the left argument cannot be -0,
// then the result of the add/sub operation cannot be either.
AddToWorklist(instr->left());
}
} else if (value->IsMathMinMax()) {
HMathMinMax* instr = HMathMinMax::cast(value);
AddToWorklist(instr->right());
AddToWorklist(instr->left());
}
}
in_worklist_.Clear();
DCHECK(in_worklist_.IsEmpty());
DCHECK(worklist_.is_empty());
}
} // namespace internal
} // namespace v8

View File

@ -1,52 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
#define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
#include "src/base/compiler-specific.h"
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HRangeAnalysisPhase : public HPhase {
public:
explicit HRangeAnalysisPhase(HGraph* graph)
: HPhase("H_Range analysis", graph), changed_ranges_(16, zone()),
in_worklist_(graph->GetMaximumValueID(), zone()),
worklist_(32, zone()) {}
void Run();
private:
PRINTF_FORMAT(2, 3) void TraceRange(const char* msg, ...);
void InferControlFlowRange(HCompareNumericAndBranch* test,
HBasicBlock* dest);
void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
void InferRange(HValue* value);
void RollBackTo(int index);
void AddRange(HValue* value, Range* range);
void AddToWorklist(HValue* value) {
if (in_worklist_.Contains(value->id())) return;
in_worklist_.Add(value->id());
worklist_.Add(value, zone());
}
void PropagateMinusZeroChecks(HValue* value);
void PoisonRanges();
ZoneList<HValue*> changed_ranges_;
BitVector in_worklist_;
ZoneList<HValue*> worklist_;
DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_

View File

@ -1,67 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-redundant-phi.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HRedundantPhiEliminationPhase::Run() {
// Gather all phis from all blocks first.
const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
ZoneList<HPhi*> all_phis(blocks->length(), zone());
for (int i = 0; i < blocks->length(); ++i) {
HBasicBlock* block = blocks->at(i);
for (int j = 0; j < block->phis()->length(); j++) {
all_phis.Add(block->phis()->at(j), zone());
}
}
// Iteratively reduce all phis in the list.
ProcessPhis(&all_phis);
#if DEBUG
// Make sure that we *really* removed all redundant phis.
for (int i = 0; i < blocks->length(); ++i) {
for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
DCHECK(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL);
}
}
#endif
}
void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) {
ProcessPhis(block->phis());
}
void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) {
bool updated;
do {
// Iterately replace all redundant phis in the given list.
updated = false;
for (int i = 0; i < phis->length(); i++) {
HPhi* phi = phis->at(i);
if (phi->CheckFlag(HValue::kIsDead)) continue; // Already replaced.
HValue* replacement = phi->GetRedundantReplacement();
if (replacement != NULL) {
phi->SetFlag(HValue::kIsDead);
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
value->SetOperandAt(it.index(), replacement);
// Iterate again if used in another non-dead phi.
updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead);
}
phi->block()->RemovePhi(phi);
}
}
} while (updated);
}
} // namespace internal
} // namespace v8

View File

@ -1,34 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
#define V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
// Replace all phis consisting of a single non-loop operand plus any number of
// loop operands by that single non-loop operand.
class HRedundantPhiEliminationPhase : public HPhase {
public:
explicit HRedundantPhiEliminationPhase(HGraph* graph)
: HPhase("H_Redundant phi elimination", graph) { }
void Run();
void ProcessBlock(HBasicBlock* block);
private:
void ProcessPhis(const ZoneList<HPhi*>* phis);
DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_

View File

@ -1,190 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-removable-simulates.h"
#include "src/crankshaft/hydrogen-flow-engine.h"
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
class State : public ZoneObject {
public:
explicit State(Zone* zone)
: zone_(zone), mergelist_(2, zone), first_(true), mode_(NORMAL) { }
State* Process(HInstruction* instr, Zone* zone) {
if (FLAG_trace_removable_simulates) {
PrintF("[%s with state %p in B%d: #%d %s]\n",
mode_ == NORMAL ? "processing" : "collecting",
reinterpret_cast<void*>(this), instr->block()->block_id(),
instr->id(), instr->Mnemonic());
}
// Forward-merge "trains" of simulates after an instruction with observable
// side effects to keep live ranges short.
if (mode_ == COLLECT_CONSECUTIVE_SIMULATES) {
if (instr->IsSimulate()) {
HSimulate* current_simulate = HSimulate::cast(instr);
if (current_simulate->is_candidate_for_removal() &&
!current_simulate->ast_id().IsNone()) {
Remember(current_simulate);
return this;
}
}
FlushSimulates();
mode_ = NORMAL;
}
// Ensure there's a non-foldable HSimulate before an HEnterInlined to avoid
// folding across HEnterInlined.
DCHECK(!(instr->IsEnterInlined() &&
HSimulate::cast(instr->previous())->is_candidate_for_removal()));
if (instr->IsLeaveInlined() || instr->IsReturn()) {
// Never fold simulates from inlined environments into simulates in the
// outer environment. Simply remove all accumulated simulates without
// merging. This is safe because simulates after instructions with side
// effects are never added to the merge list. The same reasoning holds for
// return instructions.
RemoveSimulates();
return this;
}
if (instr->IsControlInstruction()) {
// Merge the accumulated simulates at the end of the block.
FlushSimulates();
return this;
}
if (instr->IsCapturedObject()) {
// Do not merge simulates across captured objects - captured objects
// change environments during environment replay, and such changes
// would not be reflected in the simulate.
FlushSimulates();
return this;
}
// Skip the non-simulates and the first simulate.
if (!instr->IsSimulate()) return this;
if (first_) {
first_ = false;
return this;
}
HSimulate* current_simulate = HSimulate::cast(instr);
if (!current_simulate->is_candidate_for_removal()) {
Remember(current_simulate);
FlushSimulates();
} else if (current_simulate->ast_id().IsNone()) {
DCHECK(current_simulate->next()->IsEnterInlined());
FlushSimulates();
} else if (current_simulate->previous()->HasObservableSideEffects()) {
Remember(current_simulate);
mode_ = COLLECT_CONSECUTIVE_SIMULATES;
} else {
Remember(current_simulate);
}
return this;
}
static State* Merge(State* succ_state,
HBasicBlock* succ_block,
State* pred_state,
HBasicBlock* pred_block,
Zone* zone) {
return (succ_state == NULL)
? pred_state->Copy(succ_block, pred_block, zone)
: succ_state->Merge(succ_block, pred_state, pred_block, zone);
}
static State* Finish(State* state, HBasicBlock* block, Zone* zone) {
if (FLAG_trace_removable_simulates) {
PrintF("[preparing state %p for B%d]\n", reinterpret_cast<void*>(state),
block->block_id());
}
// For our current local analysis, we should not remember simulates across
// block boundaries.
DCHECK(!state->HasRememberedSimulates());
// Nasty heuristic: Never remove the first simulate in a block. This
// just so happens to have a beneficial effect on register allocation.
state->first_ = true;
return state;
}
private:
explicit State(const State& other)
: zone_(other.zone_),
mergelist_(other.mergelist_, other.zone_),
first_(other.first_),
mode_(other.mode_) { }
enum Mode { NORMAL, COLLECT_CONSECUTIVE_SIMULATES };
bool HasRememberedSimulates() const { return !mergelist_.is_empty(); }
void Remember(HSimulate* sim) {
mergelist_.Add(sim, zone_);
}
void FlushSimulates() {
if (HasRememberedSimulates()) {
mergelist_.RemoveLast()->MergeWith(&mergelist_);
}
}
void RemoveSimulates() {
while (HasRememberedSimulates()) {
mergelist_.RemoveLast()->DeleteAndReplaceWith(NULL);
}
}
State* Copy(HBasicBlock* succ_block, HBasicBlock* pred_block, Zone* zone) {
State* copy = new(zone) State(*this);
if (FLAG_trace_removable_simulates) {
PrintF("[copy state %p from B%d to new state %p for B%d]\n",
reinterpret_cast<void*>(this), pred_block->block_id(),
reinterpret_cast<void*>(copy), succ_block->block_id());
}
return copy;
}
State* Merge(HBasicBlock* succ_block,
State* pred_state,
HBasicBlock* pred_block,
Zone* zone) {
// For our current local analysis, we should not remember simulates across
// block boundaries.
DCHECK(!pred_state->HasRememberedSimulates());
DCHECK(!HasRememberedSimulates());
if (FLAG_trace_removable_simulates) {
PrintF("[merge state %p from B%d into %p for B%d]\n",
reinterpret_cast<void*>(pred_state), pred_block->block_id(),
reinterpret_cast<void*>(this), succ_block->block_id());
}
return this;
}
Zone* zone_;
ZoneList<HSimulate*> mergelist_;
bool first_;
Mode mode_;
};
// We don't use effects here.
class Effects : public ZoneObject {
public:
explicit Effects(Zone* zone) { }
bool Disabled() { return true; }
void Process(HInstruction* instr, Zone* zone) { }
void Apply(State* state) { }
void Union(Effects* that, Zone* zone) { }
};
void HMergeRemovableSimulatesPhase::Run() {
HFlowEngine<State, Effects> engine(graph(), zone());
State* state = new(zone()) State(zone());
engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), state);
}
} // namespace internal
} // namespace v8

View File

@ -1,29 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
#define V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HMergeRemovableSimulatesPhase : public HPhase {
public:
explicit HMergeRemovableSimulatesPhase(HGraph* graph)
: HPhase("H_Merge removable simulates", graph) { }
void Run();
private:
DISALLOW_COPY_AND_ASSIGN(HMergeRemovableSimulatesPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_

View File

@ -1,245 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-representation-changes.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
HValue* value, HValue* use_value, int use_index, Representation to) {
// Insert the representation change right before its use. For phi-uses we
// insert at the end of the corresponding predecessor.
HInstruction* next = NULL;
if (use_value->IsPhi()) {
next = use_value->block()->predecessors()->at(use_index)->end();
} else {
next = HInstruction::cast(use_value);
}
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
// change instructions for them.
HInstruction* new_value = NULL;
bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
bool is_truncating_to_number =
use_value->CheckFlag(HValue::kTruncatingToNumber);
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
if (is_truncating_to_int && to.IsInteger32()) {
Maybe<HConstant*> res = constant->CopyToTruncatedInt32(graph()->zone());
if (res.IsJust()) new_value = res.FromJust();
} else {
new_value = constant->CopyToRepresentation(to, graph()->zone());
}
}
if (new_value == NULL) {
new_value = new (graph()->zone())
HChange(value, to, is_truncating_to_smi, is_truncating_to_int,
is_truncating_to_number);
}
new_value->InsertBefore(next);
use_value->SetOperandAt(use_index, new_value);
}
static bool IsNonDeoptingIntToSmiChange(HChange* change) {
Representation from_rep = change->from();
Representation to_rep = change->to();
// Flags indicating Uint32 operations are set in a later Hydrogen phase.
DCHECK(!change->CheckFlag(HValue::kUint32));
return from_rep.IsInteger32() && to_rep.IsSmi() && SmiValuesAre32Bits();
}
void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
HValue* value) {
Representation r = value->representation();
if (r.IsNone()) {
#ifdef DEBUG
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use_value = it.value();
int use_index = it.index();
Representation req = use_value->RequiredInputRepresentation(use_index);
DCHECK(req.IsNone());
}
#endif
return;
}
if (value->HasNoUses()) {
if (value->IsForceRepresentation()) value->DeleteAndReplaceWith(NULL);
return;
}
for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use_value = it.value();
int use_index = it.index();
Representation req = use_value->RequiredInputRepresentation(use_index);
if (req.IsNone() || req.Equals(r)) continue;
// If this is an HForceRepresentation instruction, and an HChange has been
// inserted above it, examine the input representation of the HChange. If
// that's int32, and this HForceRepresentation use is int32, and int32 to
// smi changes can't cause deoptimisation, set the input of the use to the
// input of the HChange.
if (value->IsForceRepresentation()) {
HValue* input = HForceRepresentation::cast(value)->value();
if (input->IsChange()) {
HChange* change = HChange::cast(input);
if (change->from().Equals(req) && IsNonDeoptingIntToSmiChange(change)) {
use_value->SetOperandAt(use_index, change->value());
continue;
}
}
}
InsertRepresentationChangeForUse(value, use_value, use_index, req);
}
if (value->HasNoUses()) {
DCHECK(value->IsConstant() || value->IsForceRepresentation());
value->DeleteAndReplaceWith(NULL);
} else {
// The only purpose of a HForceRepresentation is to represent the value
// after the (possible) HChange instruction. We make it disappear.
if (value->IsForceRepresentation()) {
value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
}
}
}
void HRepresentationChangesPhase::Run() {
// Compute truncation flag for phis:
//
// - Initially assume that all phis allow truncation to number and iteratively
// remove the ones that are used in an operation that not do an implicit
// ToNumber conversion.
// - Also assume that all Integer32 phis allow ToInt32 truncation and all
// Smi phis allow truncation to Smi.
//
ZoneList<HPhi*> number_worklist(8, zone());
ZoneList<HPhi*> int_worklist(8, zone());
ZoneList<HPhi*> smi_worklist(8, zone());
const ZoneList<HPhi*>* phi_list(graph()->phi_list());
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
if (phi->representation().IsInteger32()) {
phi->SetFlag(HValue::kTruncatingToInt32);
} else if (phi->representation().IsSmi()) {
phi->SetFlag(HValue::kTruncatingToSmi);
phi->SetFlag(HValue::kTruncatingToInt32);
}
phi->SetFlag(HValue::kTruncatingToNumber);
}
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
HValue* value = NULL;
if (phi->CheckFlag(HValue::kTruncatingToNumber) &&
!phi->CheckUsesForFlag(HValue::kTruncatingToNumber, &value)) {
number_worklist.Add(phi, zone());
phi->ClearFlag(HValue::kTruncatingToNumber);
phi->ClearFlag(HValue::kTruncatingToInt32);
phi->ClearFlag(HValue::kTruncatingToSmi);
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Number because of #%d %s\n",
phi->id(), value->id(), value->Mnemonic());
}
} else if (phi->representation().IsSmiOrInteger32() &&
!phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
int_worklist.Add(phi, zone());
phi->ClearFlag(HValue::kTruncatingToInt32);
phi->ClearFlag(HValue::kTruncatingToSmi);
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
phi->id(), value->id(), value->Mnemonic());
}
} else if (phi->representation().IsSmi() &&
!phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
smi_worklist.Add(phi, zone());
phi->ClearFlag(HValue::kTruncatingToSmi);
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
phi->id(), value->id(), value->Mnemonic());
}
}
}
while (!number_worklist.is_empty()) {
HPhi* current = number_worklist.RemoveLast();
for (int i = current->OperandCount() - 1; i >= 0; --i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() && input->CheckFlag(HValue::kTruncatingToNumber)) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Number because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
input->ClearFlag(HValue::kTruncatingToNumber);
input->ClearFlag(HValue::kTruncatingToInt32);
input->ClearFlag(HValue::kTruncatingToSmi);
number_worklist.Add(HPhi::cast(input), zone());
}
}
}
while (!int_worklist.is_empty()) {
HPhi* current = int_worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
input->representation().IsSmiOrInteger32() &&
input->CheckFlag(HValue::kTruncatingToInt32)) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
input->ClearFlag(HValue::kTruncatingToInt32);
int_worklist.Add(HPhi::cast(input), zone());
}
}
}
while (!smi_worklist.is_empty()) {
HPhi* current = smi_worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
HValue* input = current->OperandAt(i);
if (input->IsPhi() &&
input->representation().IsSmi() &&
input->CheckFlag(HValue::kTruncatingToSmi)) {
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
input->id(), current->id(), current->Mnemonic());
}
input->ClearFlag(HValue::kTruncatingToSmi);
smi_worklist.Add(HPhi::cast(input), zone());
}
}
}
const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
for (int i = 0; i < blocks->length(); ++i) {
// Process phi instructions first.
const HBasicBlock* block(blocks->at(i));
const ZoneList<HPhi*>* phis = block->phis();
for (int j = 0; j < phis->length(); j++) {
InsertRepresentationChangesForValue(phis->at(j));
}
// Process normal instructions.
for (HInstruction* current = block->first(); current != NULL; ) {
HInstruction* next = current->next();
InsertRepresentationChangesForValue(current);
current = next;
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,33 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
#define V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HRepresentationChangesPhase : public HPhase {
public:
explicit HRepresentationChangesPhase(HGraph* graph)
: HPhase("H_Representation changes", graph) { }
void Run();
private:
void InsertRepresentationChangeForUse(HValue* value,
HValue* use_value,
int use_index,
Representation to);
void InsertRepresentationChangesForValue(HValue* value);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_

View File

@ -1,40 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-sce.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void HStackCheckEliminationPhase::Run() {
// For each loop block walk the dominator tree from the backwards branch to
// the loop header. If a call instruction is encountered the backwards branch
// is dominated by a call and the stack check in the backwards branch can be
// removed.
for (int i = 0; i < graph()->blocks()->length(); i++) {
HBasicBlock* block = graph()->blocks()->at(i);
if (block->IsLoopHeader()) {
HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
HBasicBlock* dominator = back_edge;
while (true) {
for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
if (it.Current()->HasStackCheck()) {
block->loop_information()->stack_check()->Eliminate();
break;
}
}
// Done when the loop header is processed.
if (dominator == block) break;
// Move up the dominator tree.
dominator = dominator->dominator();
}
}
}
}
} // namespace internal
} // namespace v8

View File

@ -1,26 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_SCE_H_
#define V8_CRANKSHAFT_HYDROGEN_SCE_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
class HStackCheckEliminationPhase : public HPhase {
public:
explicit HStackCheckEliminationPhase(HGraph* graph)
: HPhase("H_Stack check elimination", graph) { }
void Run();
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_SCE_H_

View File

@ -1,122 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-store-elimination.h"
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
#define TRACE(x) if (FLAG_trace_store_elimination) PrintF x
// Performs a block-by-block local analysis for removable stores.
void HStoreEliminationPhase::Run() {
GVNFlagSet flags; // Use GVN flags as an approximation for some instructions.
flags.RemoveAll();
flags.Add(kArrayElements);
flags.Add(kArrayLengths);
flags.Add(kStringLengths);
flags.Add(kBackingStoreFields);
flags.Add(kDoubleArrayElements);
flags.Add(kDoubleFields);
flags.Add(kElementsPointer);
flags.Add(kInobjectFields);
flags.Add(kExternalMemory);
flags.Add(kStringChars);
flags.Add(kTypedArrayElements);
for (int i = 0; i < graph()->blocks()->length(); i++) {
unobserved_.Rewind(0);
HBasicBlock* block = graph()->blocks()->at(i);
if (!block->IsReachable()) continue;
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
if (instr->CheckFlag(HValue::kIsDead)) continue;
switch (instr->opcode()) {
case HValue::kStoreNamedField:
// Remove any unobserved stores overwritten by this store.
ProcessStore(HStoreNamedField::cast(instr));
break;
case HValue::kLoadNamedField:
// Observe any unobserved stores on this object + field.
ProcessLoad(HLoadNamedField::cast(instr));
break;
default:
ProcessInstr(instr, flags);
break;
}
}
}
}
void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) {
HValue* object = store->object()->ActualValue();
int i = 0;
while (i < unobserved_.length()) {
HStoreNamedField* prev = unobserved_.at(i);
if (aliasing_->MustAlias(object, prev->object()->ActualValue()) &&
prev->CanBeReplacedWith(store)) {
// This store is guaranteed to overwrite the previous store.
prev->DeleteAndReplaceWith(NULL);
TRACE(("++ Unobserved store S%d overwritten by S%d\n",
prev->id(), store->id()));
unobserved_.Remove(i);
} else {
i++;
}
}
// Only non-transitioning stores are removable.
if (!store->has_transition()) {
TRACE(("-- Might remove store S%d\n", store->id()));
unobserved_.Add(store, zone());
}
}
void HStoreEliminationPhase::ProcessLoad(HLoadNamedField* load) {
HValue* object = load->object()->ActualValue();
int i = 0;
while (i < unobserved_.length()) {
HStoreNamedField* prev = unobserved_.at(i);
if (aliasing_->MayAlias(object, prev->object()->ActualValue()) &&
load->access().Equals(prev->access())) {
TRACE(("-- Observed store S%d by load L%d\n", prev->id(), load->id()));
unobserved_.Remove(i);
} else {
i++;
}
}
}
void HStoreEliminationPhase::ProcessInstr(HInstruction* instr,
GVNFlagSet flags) {
if (unobserved_.length() == 0) return; // Nothing to do.
if (instr->CanDeoptimize()) {
TRACE(("-- Observed stores at I%d (%s might deoptimize)\n",
instr->id(), instr->Mnemonic()));
unobserved_.Rewind(0);
return;
}
if (instr->CheckChangesFlag(kNewSpacePromotion)) {
TRACE(("-- Observed stores at I%d (%s might GC)\n",
instr->id(), instr->Mnemonic()));
unobserved_.Rewind(0);
return;
}
if (instr->DependsOnFlags().ContainsAnyOf(flags)) {
TRACE(("-- Observed stores at I%d (GVN flags of %s)\n",
instr->id(), instr->Mnemonic()));
unobserved_.Rewind(0);
return;
}
}
} // namespace internal
} // namespace v8

View File

@ -1,35 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
#define V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
#include "src/crankshaft/hydrogen.h"
#include "src/crankshaft/hydrogen-alias-analysis.h"
namespace v8 {
namespace internal {
class HStoreEliminationPhase : public HPhase {
public:
explicit HStoreEliminationPhase(HGraph* graph)
: HPhase("H_Store elimination", graph),
unobserved_(10, zone()),
aliasing_() { }
void Run();
private:
ZoneList<HStoreNamedField*> unobserved_;
HAliasAnalyzer* aliasing_;
void ProcessStore(HStoreNamedField* store);
void ProcessLoad(HLoadNamedField* load);
void ProcessInstr(HInstruction* instr, GVNFlagSet flags);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_

View File

@ -1,75 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-types.h"
#include "src/field-type.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
namespace internal {
// static
HType HType::FromType(AstType* type) {
if (AstType::Any()->Is(type)) return HType::Any();
if (!type->IsInhabited()) return HType::None();
if (type->Is(AstType::SignedSmall())) return HType::Smi();
if (type->Is(AstType::Number())) return HType::TaggedNumber();
if (type->Is(AstType::Null())) return HType::Null();
if (type->Is(AstType::String())) return HType::String();
if (type->Is(AstType::Boolean())) return HType::Boolean();
if (type->Is(AstType::Undefined())) return HType::Undefined();
if (type->Is(AstType::Object())) return HType::JSObject();
if (type->Is(AstType::DetectableReceiver())) return HType::JSReceiver();
return HType::Tagged();
}
// static
HType HType::FromFieldType(Handle<FieldType> type, Zone* temp_zone) {
return FromType(type->Convert(temp_zone));
}
// static
HType HType::FromValue(Handle<Object> value) {
Object* raw_value = *value;
if (raw_value->IsSmi()) return HType::Smi();
DCHECK(raw_value->IsHeapObject());
Isolate* isolate = HeapObject::cast(*value)->GetIsolate();
if (raw_value->IsNull(isolate)) return HType::Null();
if (raw_value->IsHeapNumber()) {
double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
}
if (raw_value->IsString()) return HType::String();
if (raw_value->IsBoolean()) return HType::Boolean();
if (raw_value->IsUndefined(isolate)) return HType::Undefined();
if (raw_value->IsJSArray()) {
DCHECK(!raw_value->IsUndetectable());
return HType::JSArray();
}
if (raw_value->IsJSObject() && !raw_value->IsUndetectable()) {
return HType::JSObject();
}
return HType::HeapObject();
}
std::ostream& operator<<(std::ostream& os, const HType& t) {
// Note: The c1visualizer syntax for locals allows only a sequence of the
// following characters: A-Za-z0-9_-|:
switch (t.kind_) {
#define DEFINE_CASE(Name, mask) \
case HType::k##Name: \
return os << #Name;
HTYPE_LIST(DEFINE_CASE)
#undef DEFINE_CASE
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8

View File

@ -1,95 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_TYPES_H_
#define V8_CRANKSHAFT_HYDROGEN_TYPES_H_
#include <climits>
#include <iosfwd>
#include "src/ast/ast-types.h"
#include "src/base/macros.h"
namespace v8 {
namespace internal {
// Forward declarations.
template <typename T> class Handle;
class FieldType;
class Object;
#define HTYPE_LIST(V) \
V(Any, 0x0) /* 0000 0000 0000 0000 */ \
V(Tagged, 0x1) /* 0000 0000 0000 0001 */ \
V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \
V(TaggedNumber, 0xd) /* 0000 0000 0000 1101 */ \
V(Smi, 0x1d) /* 0000 0000 0001 1101 */ \
V(HeapObject, 0x21) /* 0000 0000 0010 0001 */ \
V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \
V(Null, 0x27) /* 0000 0000 0010 0111 */ \
V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \
V(String, 0x65) /* 0000 0000 0110 0101 */ \
V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \
V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \
V(JSReceiver, 0x221) /* 0000 0010 0010 0001 */ \
V(JSObject, 0x621) /* 0000 0110 0010 0001 */ \
V(JSArray, 0xe21) /* 0000 1110 0010 0001 */ \
V(None, 0xfff) /* 0000 1111 1111 1111 */
class HType final {
public:
#define DECLARE_CONSTRUCTOR(Name, mask) \
static HType Name() WARN_UNUSED_RESULT { return HType(k##Name); }
HTYPE_LIST(DECLARE_CONSTRUCTOR)
#undef DECLARE_CONSTRUCTOR
// Return the weakest (least precise) common type.
HType Combine(HType other) const WARN_UNUSED_RESULT {
return HType(static_cast<Kind>(kind_ & other.kind_));
}
bool Equals(HType other) const WARN_UNUSED_RESULT {
return kind_ == other.kind_;
}
bool IsSubtypeOf(HType other) const WARN_UNUSED_RESULT {
return Combine(other).Equals(other);
}
#define DECLARE_IS_TYPE(Name, mask) \
bool Is##Name() const WARN_UNUSED_RESULT { \
return IsSubtypeOf(HType::Name()); \
}
HTYPE_LIST(DECLARE_IS_TYPE)
#undef DECLARE_IS_TYPE
static HType FromType(AstType* type) WARN_UNUSED_RESULT;
static HType FromFieldType(Handle<FieldType> type,
Zone* temp_zone) WARN_UNUSED_RESULT;
static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
friend std::ostream& operator<<(std::ostream& os, const HType& t);
private:
enum Kind {
#define DECLARE_TYPE(Name, mask) k##Name = mask,
HTYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
LAST_KIND = kNone
};
// Make sure type fits in int16.
STATIC_ASSERT(LAST_KIND < (1 << (CHAR_BIT * sizeof(int16_t))));
explicit HType(Kind kind) : kind_(kind) { }
int16_t kind_;
};
std::ostream& operator<<(std::ostream& os, const HType& t);
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_TYPES_H_

View File

@ -1,238 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/hydrogen-uint32-analysis.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
static bool IsUnsignedLoad(HLoadKeyed* instr) {
switch (instr->elements_kind()) {
case UINT8_ELEMENTS:
case UINT16_ELEMENTS:
case UINT32_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
return true;
default:
return false;
}
}
static bool IsUint32Operation(HValue* instr) {
return instr->IsShr() ||
(instr->IsLoadKeyed() && IsUnsignedLoad(HLoadKeyed::cast(instr))) ||
(instr->IsInteger32Constant() && instr->GetInteger32Constant() >= 0);
}
bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
// Operations that operate on bits are safe.
if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) {
return true;
} else if (use->IsSimulate() || use->IsArgumentsObject()) {
// Deoptimization has special support for uint32.
return true;
} else if (use->IsChange()) {
// Conversions have special support for uint32.
// This DCHECK guards that the conversion in question is actually
// implemented. Do not extend the whitelist without adding
// support to LChunkBuilder::DoChange().
DCHECK(HChange::cast(use)->to().IsDouble() ||
HChange::cast(use)->to().IsSmi() ||
HChange::cast(use)->to().IsTagged());
return true;
} else if (use->IsStoreKeyed()) {
HStoreKeyed* store = HStoreKeyed::cast(use);
if (store->is_fixed_typed_array()) {
// Storing a value into an external integer array is a bit level
// operation.
if (store->value() == val) {
// Clamping or a conversion to double should have beed inserted.
DCHECK(store->elements_kind() != UINT8_CLAMPED_ELEMENTS);
DCHECK(store->elements_kind() != FLOAT32_ELEMENTS);
DCHECK(store->elements_kind() != FLOAT64_ELEMENTS);
return true;
}
}
} else if (use->IsCompareNumericAndBranch()) {
HCompareNumericAndBranch* c = HCompareNumericAndBranch::cast(use);
return IsUint32Operation(c->left()) && IsUint32Operation(c->right());
}
return false;
}
// Iterate over all uses and verify that they are uint32 safe: either don't
// distinguish between int32 and uint32 due to their bitwise nature or
// have special support for uint32 values.
// Encountered phis are optimistically treated as safe uint32 uses,
// marked with kUint32 flag and collected in the phis_ list. A separate
// pass will be performed later by UnmarkUnsafePhis to clear kUint32 from
// phis that are not actually uint32-safe (it requires fix point iteration).
bool HUint32AnalysisPhase::Uint32UsesAreSafe(HValue* uint32val) {
bool collect_phi_uses = false;
for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->IsPhi()) {
if (!use->CheckFlag(HInstruction::kUint32)) {
// There is a phi use of this value from a phi that is not yet
// collected in phis_ array. Separate pass is required.
collect_phi_uses = true;
}
// Optimistically treat phis as uint32 safe.
continue;
}
if (!IsSafeUint32Use(uint32val, use)) {
return false;
}
}
if (collect_phi_uses) {
for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
// There is a phi use of this value from a phi that is not yet
// collected in phis_ array. Separate pass is required.
if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
use->SetFlag(HInstruction::kUint32);
phis_.Add(HPhi::cast(use), zone());
}
}
}
return true;
}
// Check if all operands to the given phi are marked with kUint32 flag.
bool HUint32AnalysisPhase::CheckPhiOperands(HPhi* phi) {
if (!phi->CheckFlag(HInstruction::kUint32)) {
// This phi is not uint32 safe. No need to check operands.
return false;
}
for (int j = 0; j < phi->OperandCount(); j++) {
HValue* operand = phi->OperandAt(j);
if (!operand->CheckFlag(HInstruction::kUint32)) {
// Lazily mark constants that fit into uint32 range with kUint32 flag.
if (operand->IsInteger32Constant() &&
operand->GetInteger32Constant() >= 0) {
operand->SetFlag(HInstruction::kUint32);
continue;
}
// This phi is not safe, some operands are not uint32 values.
return false;
}
}
return true;
}
// Remove kUint32 flag from the phi itself and its operands. If any operand
// was a phi marked with kUint32 place it into a worklist for
// transitive clearing of kUint32 flag.
void HUint32AnalysisPhase::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
phi->ClearFlag(HInstruction::kUint32);
for (int j = 0; j < phi->OperandCount(); j++) {
HValue* operand = phi->OperandAt(j);
if (operand->CheckFlag(HInstruction::kUint32)) {
operand->ClearFlag(HInstruction::kUint32);
if (operand->IsPhi()) {
worklist->Add(HPhi::cast(operand), zone());
}
}
}
}
void HUint32AnalysisPhase::UnmarkUnsafePhis() {
// No phis were collected. Nothing to do.
if (phis_.length() == 0) return;
// Worklist used to transitively clear kUint32 from phis that
// are used as arguments to other phis.
ZoneList<HPhi*> worklist(phis_.length(), zone());
// Phi can be used as a uint32 value if and only if
// all its operands are uint32 values and all its
// uses are uint32 safe.
// Iterate over collected phis and unmark those that
// are unsafe. When unmarking phi unmark its operands
// and add it to the worklist if it is a phi as well.
// Phis that are still marked as safe are shifted down
// so that all safe phis form a prefix of the phis_ array.
int phi_count = 0;
for (int i = 0; i < phis_.length(); i++) {
HPhi* phi = phis_[i];
if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
phis_[phi_count++] = phi;
} else {
UnmarkPhi(phi, &worklist);
}
}
// Now phis array contains only those phis that have safe
// non-phi uses. Start transitively clearing kUint32 flag
// from phi operands of discovered non-safe phis until
// only safe phis are left.
while (!worklist.is_empty()) {
while (!worklist.is_empty()) {
HPhi* phi = worklist.RemoveLast();
UnmarkPhi(phi, &worklist);
}
// Check if any operands to safe phis were unmarked
// turning a safe phi into unsafe. The same value
// can flow into several phis.
int new_phi_count = 0;
for (int i = 0; i < phi_count; i++) {
HPhi* phi = phis_[i];
if (CheckPhiOperands(phi)) {
phis_[new_phi_count++] = phi;
} else {
UnmarkPhi(phi, &worklist);
}
}
phi_count = new_phi_count;
}
}
void HUint32AnalysisPhase::Run() {
if (!graph()->has_uint32_instructions()) return;
ZoneList<HInstruction*>* uint32_instructions = graph()->uint32_instructions();
for (int i = 0; i < uint32_instructions->length(); ++i) {
// Analyze instruction and mark it with kUint32 if all
// its uses are uint32 safe.
HInstruction* current = uint32_instructions->at(i);
if (current->IsLinked() &&
current->representation().IsInteger32() &&
Uint32UsesAreSafe(current)) {
current->SetFlag(HInstruction::kUint32);
}
}
// Some phis might have been optimistically marked with kUint32 flag.
// Remove this flag from those phis that are unsafe and propagate
// this information transitively potentially clearing kUint32 flag
// from some non-phi operations that are used as operands to unsafe phis.
UnmarkUnsafePhis();
}
} // namespace internal
} // namespace v8

View File

@ -1,37 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
#define V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
#include "src/crankshaft/hydrogen.h"
namespace v8 {
namespace internal {
// Discover instructions that can be marked with kUint32 flag allowing
// them to produce full range uint32 values.
class HUint32AnalysisPhase : public HPhase {
public:
explicit HUint32AnalysisPhase(HGraph* graph)
: HPhase("H_Compute safe UInt32 operations", graph), phis_(4, zone()) { }
void Run();
private:
INLINE(bool IsSafeUint32Use(HValue* val, HValue* use));
INLINE(bool Uint32UsesAreSafe(HValue* uint32val));
INLINE(bool CheckPhiOperands(HPhi* phi));
INLINE(void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist));
INLINE(void UnmarkUnsafePhis());
ZoneList<HPhi*> phis_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,387 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
#define V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
#include "src/ast/scopes.h"
#include "src/base/logging.h"
#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
#include "src/crankshaft/ia32/lithium-ia32.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class LGapNode;
class SafepointGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
!info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Immediate ToImmediate(LOperand* op, const Representation& r) const {
return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
}
double ToDouble(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
Operand HighOperand(LOperand* op);
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagIU(LInstruction* instr,
LOperand* value,
LOperand* temp,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
void EnsureRelocSpaceForDeoptimization();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
Scope* scope() const { return scope_; }
XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true, Label* if_false,
Handle<String> class_name, Register input,
Register temporary, Register temporary2);
bool HasAllocatedStackSlots() const {
return chunk()->HasAllocatedStackSlots();
}
int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
int GetTotalFrameSlotCount() const {
return chunk()->GetTotalFrameSlotCount();
}
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) override;
void GenerateBodyInstructionPost(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* fun,
int argc,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int argc,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, argc, instr);
}
void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, instr);
}
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
void LoadContextFromDeferred(LOperand* context);
void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
Register scratch2, Register scratch3);
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
DeoptimizeReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
}
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
Representation key_representation,
ElementsKind elements_kind,
uint32_t base_offset);
Operand BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding);
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
template <class InstrType>
void EmitTrueBranch(InstrType instr, Condition cc);
template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
XMMRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
Label* is_not_string,
SmiCheck check_needed);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset,
AllocationSiteMode mode);
void EnsureSpaceForLazyDeopt(int space_needed) override;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
void EmitReturn(LReturn* instr);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
friend class LGapResolver;
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write an arbitrary value to each page in range
// esp + offset - page_size .. esp in turn.
void MakeSureStackPagesMapped(int offset);
#endif
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
DCHECK(codegen_->info()->is_calling());
}
~PushSafepointRegistersScope() {
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
codegen_->masm_->PopSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
Label done_;
int instruction_index_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_

View File

@ -1,490 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
#include "src/register-configuration.h"
namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32, owner->zone()),
source_uses_(),
destination_uses_(),
spilled_register_(-1) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
DCHECK(HasBeenReset());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
PerformMove(i);
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
Finish();
DCHECK(HasBeenReset());
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) AddMove(move);
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph. We use operand swaps to resolve cycles,
// which means that a call to PerformMove could change any source operand
// in the move graph.
DCHECK(!moves_[index].IsPending());
DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved on the side.
DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does
// not miss any). Assume there is a non-blocking move with source A
// and this move is blocked on source B and there is a swap of A and
// B. Then A and B must be involved in the same cycle (or they would
// not be swapped). Since this move's destination is B and there is
// only a single incoming edge to an operand, this move must also be
// involved in the same cycle. In that case, the blocking move will
// be created but will be "pending" when we return from PerformMove.
PerformMove(i);
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// This move's source may have changed due to swaps to resolve cycles and
// so it may now be the last move in the cycle. If so remove it.
if (moves_[index].source()->Equals(destination)) {
RemoveMove(index);
return;
}
// The move may be blocked on a (at most one) pending move, in which case
// we have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination)) {
DCHECK(other_move.IsPending());
EmitSwap(index);
return;
}
}
// This move is not blocked.
EmitMove(index);
}
void LGapResolver::AddMove(LMoveOperands move) {
LOperand* source = move.source();
if (source->IsRegister()) ++source_uses_[source->index()];
LOperand* destination = move.destination();
if (destination->IsRegister()) ++destination_uses_[destination->index()];
moves_.Add(move, cgen_->zone());
}
void LGapResolver::RemoveMove(int index) {
LOperand* source = moves_[index].source();
if (source->IsRegister()) {
--source_uses_[source->index()];
DCHECK(source_uses_[source->index()] >= 0);
}
LOperand* destination = moves_[index].destination();
if (destination->IsRegister()) {
--destination_uses_[destination->index()];
DCHECK(destination_uses_[destination->index()] >= 0);
}
moves_[index].Eliminate();
}
int LGapResolver::CountSourceUses(LOperand* operand) {
int count = 0;
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
++count;
}
}
return count;
}
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : reg.code();
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
code != skip_index) {
return Register::from_code(code);
}
}
return no_reg;
}
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] != 0) return false;
if (destination_uses_[code] != 0) return false;
}
return true;
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Finish() {
if (spilled_register_ >= 0) {
__ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
moves_.Rewind(0);
}
void LGapResolver::EnsureRestored(LOperand* operand) {
if (operand->IsRegister() && operand->index() == spilled_register_) {
__ pop(Register::from_code(spilled_register_));
spilled_register_ = -1;
}
}
Register LGapResolver::EnsureTempRegister() {
// 1. We may have already spilled to create a temp register.
if (spilled_register_ >= 0) {
return Register::from_code(spilled_register_);
}
// 2. We may have a free register that we can use without spilling.
Register free = GetFreeRegisterNot(no_reg);
if (!free.is(no_reg)) return free;
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
Register scratch = Register::from_code(code);
__ push(scratch);
spilled_register_ = code;
return scratch;
}
}
// 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
spilled_register_ = config->GetAllocatableGeneralCode(0);
Register scratch = Register::from_code(spilled_register_);
__ push(scratch);
return scratch;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
EnsureRestored(source);
EnsureRestored(destination);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = cgen_->ToRegister(source);
Operand dst = cgen_->ToOperand(destination);
__ mov(dst, src);
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
__ mov(dst, src);
} else {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
Register tmp = EnsureTempRegister();
Operand dst = cgen_->ToOperand(destination);
__ mov(tmp, src);
__ mov(dst, tmp);
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
double v = cgen_->ToDouble(constant_source);
uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
XMMRegister dst = cgen_->ToDoubleRegister(destination);
if (int_val == 0) {
__ xorps(dst, dst);
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
__ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
DCHECK(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ Move(dst, cgen_->ToImmediate(constant_source, r));
} else {
Register tmp = EnsureTempRegister();
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
__ mov(dst, tmp);
}
}
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
__ movsd(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
__ movsd(xmm0, src);
__ movsd(dst, xmm0);
}
} else {
UNREACHABLE();
}
RemoveMove(index);
}
void LGapResolver::EmitSwap(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
EnsureRestored(source);
EnsureRestored(destination);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
__ push(src);
__ mov(src, dst);
__ pop(dst);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
// Register-memory. Use a free register as a temp if possible. Do not
// spill on demand because the simple spill implementation cannot avoid
// spilling src at this point.
Register tmp = GetFreeRegisterNot(no_reg);
Register reg =
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
if (tmp.is(no_reg)) {
__ xor_(reg, mem);
__ xor_(mem, reg);
__ xor_(reg, mem);
} else {
__ mov(tmp, mem);
__ mov(mem, reg);
__ mov(reg, tmp);
}
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory. Spill on demand to use a temporary. If there is a
// free register after that, use it as a second temporary.
Register tmp0 = EnsureTempRegister();
Register tmp1 = GetFreeRegisterNot(tmp0);
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
if (tmp1.is(no_reg)) {
// Only one temp register available to us.
__ mov(tmp0, dst);
__ xor_(tmp0, src);
__ xor_(src, tmp0);
__ xor_(tmp0, src);
__ mov(dst, tmp0);
} else {
__ mov(tmp0, dst);
__ mov(tmp1, src);
__ mov(dst, tmp1);
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movaps(xmm0, src);
__ movaps(src, dst);
__ movaps(dst, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
? source
: destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
__ movsd(xmm0, other);
__ movsd(other, reg);
__ movaps(reg, xmm0);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
// Double-width memory-to-memory. Spill on demand to use a general
// purpose temporary register and also rely on having xmm0 available as
// a fixed scratch register.
Register tmp = EnsureTempRegister();
Operand src0 = cgen_->ToOperand(source);
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
__ movsd(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
__ movsd(src0, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
}
// The swap of source and destination has executed a move from source to
// destination.
RemoveMove(index);
// Any unperformed (including pending) move with a source of either
// this move's source or destination needs to have their source
// changed to reflect the state of affairs after the swap.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(source)) {
moves_[i].set_source(destination);
} else if (other_move.Blocks(destination)) {
moves_[i].set_source(source);
}
}
// In addition to swapping the actual uses as sources, we need to update
// the use counts.
if (source->IsRegister() && destination->IsRegister()) {
int temp = source_uses_[source->index()];
source_uses_[source->index()] = source_uses_[destination->index()];
source_uses_[destination->index()] = temp;
} else if (source->IsRegister()) {
// We don't have use counts for non-register operands like destination.
// Compute those counts now.
source_uses_[source->index()] = CountSourceUses(source);
} else if (destination->IsRegister()) {
source_uses_[destination->index()] = CountSourceUses(destination);
}
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_IA32

View File

@ -1,86 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#define V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver final BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// Emit any code necessary at the end of a gap move.
void Finish();
// Add or delete a move from the move graph without emitting any code.
// Used to build up the graph and remove trivial moves.
void AddMove(LMoveOperands move);
void RemoveMove(int index);
// Report the count of uses of operand as a source in a not-yet-performed
// move. Used to rebuild use counts.
int CountSourceUses(LOperand* operand);
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Execute a move by emitting a swap of two operands. The move from
// source to destination is removed from the move graph.
void EmitSwap(int index);
// Ensure that the given operand is not spilled.
void EnsureRestored(LOperand* operand);
// Return a register that can be used as a temp register, spilling
// something if necessary.
Register EnsureTempRegister();
// Return a known free register different from the given one (which could
// be no_reg---returning any free register), or no_reg if there is no such
// register.
Register GetFreeRegisterNot(Register reg);
// Verify that the state is the initial one, ready to resolve a single
// parallel move.
bool HasBeenReset();
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
int source_uses_[Register::kNumRegisters];
int destination_uses_[DoubleRegister::kMaxNumRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
int spilled_register_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
#include "src/crankshaft/lithium-allocator.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
namespace v8 {
namespace internal {
bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
LInstruction* LAllocator::InstructionAt(int index) {
return chunk_->instructions()->at(index);
}
LGap* LAllocator::GapAt(int index) {
return chunk_->GetGapAt(index);
}
void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
if (range->Kind() == DOUBLE_REGISTERS) {
assigned_double_registers_->Add(reg);
} else {
DCHECK(range->Kind() == GENERAL_REGISTERS);
assigned_registers_->Add(reg);
}
range->set_assigned_register(reg, chunk()->zone());
}
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_

File diff suppressed because it is too large Load Diff

View File

@ -1,576 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
#include "src/allocation.h"
#include "src/base/compiler-specific.h"
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/lithium.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
// Forward declarations.
class HBasicBlock;
class HGraph;
class HPhi;
class HTracer;
class HValue;
class BitVector;
class StringStream;
class LPlatformChunk;
class LOperand;
class LUnallocated;
class LGap;
class LParallelMove;
class LPointerMap;
// This class represents a single point of a LOperand's lifetime.
// For each lithium instruction there are exactly two lifetime positions:
// the beginning and the end of the instruction. Lifetime positions for
// different lithium instructions are disjoint.
class LifetimePosition {
public:
// Return the lifetime position that corresponds to the beginning of
// the instruction with the given index.
static LifetimePosition FromInstructionIndex(int index) {
return LifetimePosition(index * kStep);
}
// Returns a numeric representation of this lifetime position.
int Value() const {
return value_;
}
// Returns the index of the instruction to which this lifetime position
// corresponds.
int InstructionIndex() const {
DCHECK(IsValid());
return value_ / kStep;
}
// Returns true if this lifetime position corresponds to the instruction
// start.
bool IsInstructionStart() const {
return (value_ & (kStep - 1)) == 0;
}
// Returns the lifetime position for the start of the instruction which
// corresponds to this lifetime position.
LifetimePosition InstructionStart() const {
DCHECK(IsValid());
return LifetimePosition(value_ & ~(kStep - 1));
}
// Returns the lifetime position for the end of the instruction which
// corresponds to this lifetime position.
LifetimePosition InstructionEnd() const {
DCHECK(IsValid());
return LifetimePosition(InstructionStart().Value() + kStep/2);
}
// Returns the lifetime position for the beginning of the next instruction.
LifetimePosition NextInstruction() const {
DCHECK(IsValid());
return LifetimePosition(InstructionStart().Value() + kStep);
}
// Returns the lifetime position for the beginning of the previous
// instruction.
LifetimePosition PrevInstruction() const {
DCHECK(IsValid());
DCHECK(value_ > 1);
return LifetimePosition(InstructionStart().Value() - kStep);
}
// Constructs the lifetime position which does not correspond to any
// instruction.
LifetimePosition() : value_(-1) {}
// Returns true if this lifetime positions corrensponds to some
// instruction.
bool IsValid() const { return value_ != -1; }
static inline LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition MaxPosition() {
// We have to use this kind of getter instead of static member due to
// crash bug in GDB.
return LifetimePosition(kMaxInt);
}
private:
static const int kStep = 2;
// Code relies on kStep being a power of two.
STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
explicit LifetimePosition(int value) : value_(value) { }
int value_;
};
// Representation of the non-empty interval [start,end[.
class UseInterval: public ZoneObject {
public:
UseInterval(LifetimePosition start, LifetimePosition end)
: start_(start), end_(end), next_(NULL) {
DCHECK(start.Value() < end.Value());
}
LifetimePosition start() const { return start_; }
LifetimePosition end() const { return end_; }
UseInterval* next() const { return next_; }
// Split this interval at the given position without effecting the
// live range that owns it. The interval must contain the position.
void SplitAt(LifetimePosition pos, Zone* zone);
// If this interval intersects with other return smallest position
// that belongs to both of them.
LifetimePosition Intersect(const UseInterval* other) const {
if (other->start().Value() < start_.Value()) return other->Intersect(this);
if (other->start().Value() < end_.Value()) return other->start();
return LifetimePosition::Invalid();
}
bool Contains(LifetimePosition point) const {
return start_.Value() <= point.Value() && point.Value() < end_.Value();
}
private:
void set_start(LifetimePosition start) { start_ = start; }
void set_next(UseInterval* next) { next_ = next; }
LifetimePosition start_;
LifetimePosition end_;
UseInterval* next_;
friend class LiveRange; // Assigns to start_.
};
// Representation of a use position.
class UsePosition: public ZoneObject {
public:
UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint);
LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; }
bool HasHint() const;
bool RequiresRegister() const;
bool RegisterIsBeneficial() const;
LifetimePosition pos() const { return pos_; }
UsePosition* next() const { return next_; }
private:
void set_next(UsePosition* next) { next_ = next; }
LOperand* const operand_;
LOperand* const hint_;
LifetimePosition const pos_;
UsePosition* next_;
bool requires_reg_;
bool register_beneficial_;
friend class LiveRange;
};
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
class LiveRange: public ZoneObject {
public:
static const int kInvalidAssignment = 0x7fffffff;
LiveRange(int id, Zone* zone);
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
LiveRange* parent() const { return parent_; }
LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
LiveRange* next() const { return next_; }
bool IsChild() const { return parent() != NULL; }
int id() const { return id_; }
bool IsFixed() const { return id_ < 0; }
bool IsEmpty() const { return first_interval() == NULL; }
LOperand* CreateAssignedOperand(Zone* zone);
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
void set_assigned_register(int reg, Zone* zone);
void MakeSpilled(Zone* zone);
// Returns use position in this live range that follows both start
// and last processed use position.
// Modifies internal state of live range!
UsePosition* NextUsePosition(LifetimePosition start);
// Returns use position for which register is required in this live
// range and which follows both start and last processed use position
// Modifies internal state of live range!
UsePosition* NextRegisterPosition(LifetimePosition start);
// Returns use position for which register is beneficial in this live
// range and which follows both start and last processed use position
// Modifies internal state of live range!
UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
// Returns use position for which register is beneficial in this live
// range and which precedes start.
UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos);
// Split this live range at the given position which must follow the start of
// the range.
// All uses following the given position will be moved from this
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
RegisterKind Kind() const { return kind_; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
bool IsSpilled() const { return spilled_; }
LOperand* current_hint_operand() const {
DCHECK(current_hint_operand_ == FirstHint());
return current_hint_operand_;
}
LOperand* FirstHint() const {
UsePosition* pos = first_pos_;
while (pos != NULL && !pos->HasHint()) pos = pos->next();
if (pos != NULL) return pos->hint();
return NULL;
}
LifetimePosition Start() const {
DCHECK(!IsEmpty());
return first_interval()->start();
}
LifetimePosition End() const {
DCHECK(!IsEmpty());
return last_interval_->end();
}
bool HasAllocatedSpillOperand() const;
LOperand* GetSpillOperand() const { return spill_operand_; }
void SetSpillOperand(LOperand* operand);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
}
bool ShouldBeAllocatedBefore(const LiveRange* other) const;
bool CanCover(LifetimePosition position) const;
bool Covers(LifetimePosition position);
LifetimePosition FirstIntersection(LiveRange* other);
// Add a new interval or a new use position to this live range.
void EnsureInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone);
void AddUseInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone);
void AddUsePosition(LifetimePosition pos,
LOperand* operand,
LOperand* hint,
Zone* zone);
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start);
#ifdef DEBUG
// True if target overlaps an existing interval.
bool HasOverlap(UseInterval* target) const;
void Verify() const;
#endif
private:
void ConvertOperands(Zone* zone);
UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
void AdvanceLastProcessedMarker(UseInterval* to_start_of,
LifetimePosition but_not_past) const;
int id_;
bool spilled_;
RegisterKind kind_;
int assigned_register_;
UseInterval* last_interval_;
UseInterval* first_interval_;
UsePosition* first_pos_;
LiveRange* parent_;
LiveRange* next_;
// This is used as a cache, it doesn't affect correctness.
mutable UseInterval* current_interval_;
UsePosition* last_processed_use_;
// This is used as a cache, it's invalid outside of BuildLiveRanges.
LOperand* current_hint_operand_;
LOperand* spill_operand_;
int spill_start_index_;
friend class LAllocator; // Assigns to kind_.
};
class LAllocator BASE_EMBEDDED {
public:
LAllocator(int first_virtual_register, HGraph* graph);
static PRINTF_FORMAT(1, 2) void TraceAlloc(const char* msg, ...);
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
bool Allocate(LChunk* chunk);
const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
const Vector<LiveRange*>* fixed_live_ranges() const {
return &fixed_live_ranges_;
}
const Vector<LiveRange*>* fixed_double_live_ranges() const {
return &fixed_double_live_ranges_;
}
LPlatformChunk* chunk() const { return chunk_; }
HGraph* graph() const { return graph_; }
Isolate* isolate() const { return graph_->isolate(); }
Zone* zone() { return &zone_; }
int GetVirtualRegister() {
if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) {
allocation_ok_ = false;
// Maintain the invariant that we return something below the maximum.
return 0;
}
return next_virtual_register_++;
}
bool AllocationOk() { return allocation_ok_; }
void MarkAsOsrEntry() {
// There can be only one.
DCHECK(!has_osr_entry_);
// Simply set a flag to find and process instruction later.
has_osr_entry_ = true;
}
#ifdef DEBUG
void Verify() const;
#endif
BitVector* assigned_registers() {
return assigned_registers_;
}
BitVector* assigned_double_registers() {
return assigned_double_registers_;
}
private:
void MeetRegisterConstraints();
void ResolvePhis();
void BuildLiveRanges();
void AllocateGeneralRegisters();
void AllocateDoubleRegisters();
void ConnectRanges();
void ResolveControlFlow();
void PopulatePointerMaps();
void AllocateRegisters();
bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
inline bool SafePointsAreInOrder() const;
// Liveness analysis support.
void InitializeLivenessAnalysis();
BitVector* ComputeLiveOut(HBasicBlock* block);
void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
void ProcessInstructions(HBasicBlock* block, BitVector* live);
void MeetRegisterConstraints(HBasicBlock* block);
void MeetConstraintsBetween(LInstruction* first,
LInstruction* second,
int gap_index);
void ResolvePhis(HBasicBlock* block);
// Helper methods for building intervals.
LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged);
LiveRange* LiveRangeFor(LOperand* operand);
void Define(LifetimePosition position, LOperand* operand, LOperand* hint);
void Use(LifetimePosition block_start,
LifetimePosition position,
LOperand* operand,
LOperand* hint);
void AddConstraintsGapMove(int index, LOperand* from, LOperand* to);
// Helper methods for updating the life range lists.
void AddToActive(LiveRange* range);
void AddToInactive(LiveRange* range);
void AddToUnhandledSorted(LiveRange* range);
void AddToUnhandledUnsorted(LiveRange* range);
void SortUnhandled();
bool UnhandledIsSorted();
void ActiveToHandled(LiveRange* range);
void ActiveToInactive(LiveRange* range);
void InactiveToHandled(LiveRange* range);
void InactiveToActive(LiveRange* range);
void FreeSpillSlot(LiveRange* range);
LOperand* TryReuseSpillSlot(LiveRange* range);
// Helper methods for allocating registers.
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
// Live range splitting helpers.
// Split the given range at the given position.
// If range starts at or after the given position then the
// original range is returned.
// Otherwise returns the live range that starts at pos and contains
// all uses from the original range that follow pos. Uses at pos will
// still be owned by the original range after splitting.
LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
// Split the given range in a position from the interval [start, end].
LiveRange* SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
// Find a lifetime position in the interval [start, end] which
// is optimal for splitting: it is either header of the outermost
// loop covered by this interval or the latest possible position.
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
// Spill the given life range after position [start] and up to position [end].
void SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
// Spill the given life range after position [start] and up to position [end].
// Range is guaranteed to be spilled at least until position [until].
void SpillBetweenUntil(LiveRange* range,
LifetimePosition start,
LifetimePosition until,
LifetimePosition end);
void SplitAndSpillIntersecting(LiveRange* range);
// If we are trying to spill a range inside the loop try to
// hoist spill position out to the point just before the loop.
LifetimePosition FindOptimalSpillingPos(LiveRange* range,
LifetimePosition pos);
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
// Helper methods for resolving control flow.
void ResolveControlFlow(LiveRange* range,
HBasicBlock* block,
HBasicBlock* pred);
inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
// Return parallel move that should be used to connect ranges split at the
// given position.
LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
// Return the block which contains give lifetime position.
HBasicBlock* GetBlock(LifetimePosition pos);
// Helper methods for the fixed registers.
int RegisterCount() const;
static int FixedLiveRangeID(int index) { return -index - 1; }
static int FixedDoubleLiveRangeID(int index);
LiveRange* FixedLiveRangeFor(int index);
LiveRange* FixedDoubleLiveRangeFor(int index);
LiveRange* LiveRangeFor(int index);
HPhi* LookupPhi(LOperand* operand) const;
LGap* GetLastGap(HBasicBlock* block);
const char* RegisterName(int allocation_index);
inline bool IsGapAt(int index);
inline LInstruction* InstructionAt(int index);
inline LGap* GapAt(int index);
Zone zone_;
LPlatformChunk* chunk_;
// During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed.
ZoneList<BitVector*> live_in_sets_;
// Liveness analysis results.
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
EmbeddedVector<LiveRange*, Register::kNumRegisters> fixed_live_ranges_;
EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
ZoneList<LiveRange*> inactive_live_ranges_;
ZoneList<LiveRange*> reusable_slots_;
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
int first_artificial_register_;
GrowableBitVector double_artificial_registers_;
RegisterKind mode_;
int num_registers_;
const int* allocatable_register_codes_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
HGraph* graph_;
bool has_osr_entry_;
// Indicates success or failure during register allocation.
bool allocation_ok_;
#ifdef DEBUG
LifetimePosition allocation_finger_;
#endif
DISALLOW_COPY_AND_ASSIGN(LAllocator);
};
class LAllocatorPhase : public CompilationPhase {
public:
LAllocatorPhase(const char* name, LAllocator* allocator);
~LAllocatorPhase();
private:
LAllocator* allocator_;
size_t allocator_zone_start_allocation_size_;
DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_

View File

@ -1,400 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/lithium-codegen.h"
#include <sstream>
#include "src/objects-inl.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
#include "src/globals.h"
namespace v8 {
namespace internal {
HGraph* LCodeGenBase::graph() const {
return chunk()->graph();
}
LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
CompilationInfo* info)
: chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
zone_(info->zone()),
status_(UNUSED),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
translations_(info->zone()),
inlined_function_count_(0),
last_lazy_deopt_pc_(0),
osr_pc_offset_(-1),
source_position_table_builder_(info->zone(),
info->SourcePositionRecordingMode()) {}
Isolate* LCodeGenBase::isolate() const { return info_->isolate(); }
bool LCodeGenBase::GenerateBody() {
DCHECK(is_generating());
bool emit_instructions = true;
LCodeGen* codegen = static_cast<LCodeGen*>(this);
for (current_instruction_ = 0;
!is_aborted() && current_instruction_ < instructions_->length();
current_instruction_++) {
LInstruction* instr = instructions_->at(current_instruction_);
// Don't emit code for basic blocks with a replacement.
if (instr->IsLabel()) {
emit_instructions = !LLabel::cast(instr)->HasReplacement() &&
(!FLAG_unreachable_code_elimination ||
instr->hydrogen_value()->block()->IsReachable());
if (FLAG_code_comments && !emit_instructions) {
Comment(
";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) "
"--------------------",
current_instruction_,
instr->hydrogen_value()->id(),
instr->hydrogen_value()->block()->block_id());
}
}
if (!emit_instructions) continue;
if (FLAG_code_comments && instr->HasInterestingComment(codegen)) {
Comment(";;; <@%d,#%d> %s",
current_instruction_,
instr->hydrogen_value()->id(),
instr->Mnemonic());
}
GenerateBodyInstructionPre(instr);
HValue* value = instr->hydrogen_value();
if (value->position().IsKnown()) {
RecordAndWritePosition(value->position());
}
instr->CompileToNative(codegen);
GenerateBodyInstructionPost(instr);
}
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
return !is_aborted();
}
void LCodeGenBase::CheckEnvironmentUsage() {
#ifdef DEBUG
bool dead_block = false;
for (int i = 0; i < instructions_->length(); i++) {
LInstruction* instr = instructions_->at(i);
HValue* hval = instr->hydrogen_value();
if (instr->IsLabel()) dead_block = LLabel::cast(instr)->HasReplacement();
if (dead_block || !hval->block()->IsReachable()) continue;
HInstruction* hinstr = HInstruction::cast(hval);
if (!hinstr->CanDeoptimize() && instr->HasEnvironment()) {
V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)",
hinstr->Mnemonic(), instr->Mnemonic());
}
if (instr->HasEnvironment() && !instr->environment()->has_been_used()) {
V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)",
hinstr->Mnemonic(), instr->Mnemonic());
}
}
#endif
}
void LCodeGenBase::RecordAndWritePosition(SourcePosition pos) {
if (!pos.IsKnown()) return;
source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
}
void LCodeGenBase::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
char buffer[4 * KB];
StringBuilder builder(buffer, arraysize(buffer));
va_list arguments;
va_start(arguments, format);
builder.AddFormattedList(format, arguments);
va_end(arguments);
// Copy the string before recording it in the assembler to avoid
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1);
MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
SourcePosition position = deopt_info.position;
int deopt_id = deopt_info.deopt_id;
if (masm()->isolate()->NeedsSourcePositionsForProfiling()) {
masm()->RecordDeoptReason(deopt_info.deopt_reason, position, deopt_id);
}
}
int LCodeGenBase::GetNextEmittedBlock() const {
for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
if (!graph()->blocks()->at(i)->IsReachable()) continue;
if (!chunk_->GetLabel(i)->HasReplacement()) return i;
}
return -1;
}
void LCodeGenBase::Abort(BailoutReason reason) {
info()->AbortOptimization(reason);
status_ = ABORTED;
}
void LCodeGenBase::Retry(BailoutReason reason) {
info()->RetryOptimization(reason);
status_ = ABORTED;
}
void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) {
if (map->is_deprecated()) return Retry(kMapBecameDeprecated);
chunk_->AddDeprecationDependency(map);
}
void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
if (!map->is_stable()) return Retry(kMapBecameUnstable);
chunk_->AddStabilityDependency(map);
}
int LCodeGenBase::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
Translation* translation) {
int translation_size = environment->translation_size();
#ifdef DEBUG
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
#endif // DEBUG
switch (environment->frame_type()) {
case JS_FUNCTION: {
UNREACHABLE();
break;
}
case JS_CONSTRUCT: {
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
translation->BeginConstructStubFrame(BailoutId::ConstructStubInvoke(),
shared_id, translation_size);
if (info()->closure().is_identical_to(environment->closure())) {
translation->StoreJSFrameFunction();
} else {
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->StoreLiteral(closure_id);
}
break;
}
case JS_GETTER: {
DCHECK_EQ(1, translation_size);
DCHECK_EQ(0, height);
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
translation->BeginGetterStubFrame(shared_id);
if (info()->closure().is_identical_to(environment->closure())) {
translation->StoreJSFrameFunction();
} else {
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->StoreLiteral(closure_id);
}
break;
}
case JS_SETTER: {
DCHECK_EQ(2, translation_size);
DCHECK_EQ(0, height);
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
translation->BeginSetterStubFrame(shared_id);
if (info()->closure().is_identical_to(environment->closure())) {
translation->StoreJSFrameFunction();
} else {
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->StoreLiteral(closure_id);
}
break;
}
case TAIL_CALLER_FUNCTION: {
DCHECK_EQ(0, translation_size);
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
translation->BeginTailCallerFrame(shared_id);
if (info()->closure().is_identical_to(environment->closure())) {
translation->StoreJSFrameFunction();
} else {
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->StoreLiteral(closure_id);
}
break;
}
case ARGUMENTS_ADAPTOR: {
int shared_id = DefineDeoptimizationLiteral(
environment->entry() ? environment->entry()->shared()
: info()->shared_info());
translation->BeginArgumentsAdaptorFrame(shared_id, translation_size);
if (info()->closure().is_identical_to(environment->closure())) {
translation->StoreJSFrameFunction();
} else {
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->StoreLiteral(closure_id);
}
break;
}
case STUB:
translation->BeginCompiledStubFrame(translation_size);
break;
}
}
namespace {
Handle<PodArray<InliningPosition>> CreateInliningPositions(
CompilationInfo* info) {
const CompilationInfo::InlinedFunctionList& inlined_functions =
info->inlined_functions();
if (inlined_functions.size() == 0) {
return Handle<PodArray<InliningPosition>>::cast(
info->isolate()->factory()->empty_byte_array());
}
Handle<PodArray<InliningPosition>> inl_positions =
PodArray<InliningPosition>::New(
info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
for (size_t i = 0; i < inlined_functions.size(); ++i) {
inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
}
return inl_positions;
}
} // namespace
void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
if (info_->IsOptimizing()) {
// Reference to shared function info does not change between phases.
AllowDeferredHandleDereference allow_handle_dereference;
data->SetSharedFunctionInfo(*info_->shared_info());
} else {
data->SetSharedFunctionInfo(Smi::kZero);
}
data->SetWeakCellCache(Smi::kZero);
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
{
AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
data->SetLiteralArray(*literals);
}
Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info_);
data->SetInliningPositions(*inl_pos);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
data->SetPc(i, Smi::FromInt(env->pc_offset()));
}
code->set_deoptimization_data(*data);
}
void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK_EQ(0, deoptimization_literals_.length());
for (CompilationInfo::InlinedFunctionHolder& inlined :
info()->inlined_functions()) {
if (!inlined.shared_info.is_identical_to(info()->shared_info())) {
int index = DefineDeoptimizationLiteral(inlined.shared_info);
inlined.RegisterInlinedFunctionId(index);
}
}
inlined_function_count_ = deoptimization_literals_.length();
}
Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
LInstruction* instr, DeoptimizeReason deopt_reason, int deopt_id) {
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
deopt_reason, deopt_id);
return deopt_info;
}
} // namespace internal
} // namespace v8

View File

@ -1,110 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
#define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
#include "src/bailout-reason.h"
#include "src/deoptimizer.h"
#include "src/source-position-table.h"
namespace v8 {
namespace internal {
class CompilationInfo;
class HGraph;
class LChunk;
class LEnvironment;
class LInstruction;
class LPlatformChunk;
class LCodeGenBase BASE_EMBEDDED {
public:
LCodeGenBase(LChunk* chunk,
MacroAssembler* assembler,
CompilationInfo* info);
virtual ~LCodeGenBase() {}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const;
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
LPlatformChunk* chunk() const { return chunk_; }
HGraph* graph() const;
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
}
void PRINTF_FORMAT(2, 3) Comment(const char* format, ...);
void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
static Deoptimizer::DeoptInfo MakeDeoptInfo(LInstruction* instr,
DeoptimizeReason deopt_reason,
int deopt_id);
bool GenerateBody();
virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
void RecordAndWritePosition(SourcePosition position);
int GetNextEmittedBlock() const;
void WriteTranslationFrame(LEnvironment* environment,
Translation* translation);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
// Check that an environment assigned via AssignEnvironment is actually being
// used. Redundant assignments keep things alive longer than necessary, and
// consequently lead to worse code, so it's important to minimize this.
void CheckEnvironmentUsage();
protected:
enum Status {
UNUSED,
GENERATING,
DONE,
ABORTED
};
LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
Zone* zone_;
Status status_;
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Handle<Object> > deoptimization_literals_;
TranslationBuffer translations_;
int inlined_function_count_;
int last_lazy_deopt_pc_;
int osr_pc_offset_;
SourcePositionTableBuilder source_position_table_builder_;
bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
void Abort(BailoutReason reason);
void Retry(BailoutReason reason);
// Methods for code dependencies.
void AddDeprecationDependency(Handle<Map> map);
void AddStabilityDependency(Handle<Map> map);
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_LITHIUM_CODEGEN_H_

View File

@ -1,116 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_LITHIUM_INL_H_
#define V8_CRANKSHAFT_LITHIUM_INL_H_
#include "src/crankshaft/lithium.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#else
#error "Unknown architecture."
#endif
namespace v8 {
namespace internal {
TempIterator::TempIterator(LInstruction* instr)
: instr_(instr), limit_(instr->TempCount()), current_(0) {
SkipUninteresting();
}
bool TempIterator::Done() { return current_ >= limit_; }
LOperand* TempIterator::Current() {
DCHECK(!Done());
return instr_->TempAt(current_);
}
void TempIterator::SkipUninteresting() {
while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
}
void TempIterator::Advance() {
++current_;
SkipUninteresting();
}
InputIterator::InputIterator(LInstruction* instr)
: instr_(instr), limit_(instr->InputCount()), current_(0) {
SkipUninteresting();
}
bool InputIterator::Done() { return current_ >= limit_; }
LOperand* InputIterator::Current() {
DCHECK(!Done());
DCHECK(instr_->InputAt(current_) != NULL);
return instr_->InputAt(current_);
}
void InputIterator::Advance() {
++current_;
SkipUninteresting();
}
void InputIterator::SkipUninteresting() {
while (current_ < limit_) {
LOperand* current = instr_->InputAt(current_);
if (current != NULL && !current->IsConstantOperand()) break;
++current_;
}
}
UseIterator::UseIterator(LInstruction* instr)
: input_iterator_(instr), env_iterator_(instr->environment()) {}
bool UseIterator::Done() {
return input_iterator_.Done() && env_iterator_.Done();
}
LOperand* UseIterator::Current() {
DCHECK(!Done());
LOperand* result = input_iterator_.Done() ? env_iterator_.Current()
: input_iterator_.Current();
DCHECK(result != NULL);
return result;
}
void UseIterator::Advance() {
input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance();
}
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_LITHIUM_INL_H_

View File

@ -1,730 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/lithium.h"
#include "src/ast/scopes.h"
#include "src/codegen.h"
#include "src/objects-inl.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
#include "src/crankshaft/x64/lithium-codegen-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
#include "src/crankshaft/arm/lithium-codegen-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/crankshaft/ppc/lithium-ppc.h" // NOLINT
#include "src/crankshaft/ppc/lithium-codegen-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
#include "src/crankshaft/mips/lithium-codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
#include "src/crankshaft/arm64/lithium-codegen-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
#include "src/crankshaft/mips64/lithium-codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
#include "src/crankshaft/x87/lithium-codegen-x87.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/crankshaft/s390/lithium-s390.h" // NOLINT
#include "src/crankshaft/s390/lithium-codegen-s390.h" // NOLINT
#else
#error "Unknown architecture."
#endif
namespace v8 {
namespace internal {
const auto GetRegConfig = RegisterConfiguration::Crankshaft;
void LOperand::PrintTo(StringStream* stream) {
LUnallocated* unalloc = NULL;
switch (kind()) {
case INVALID:
stream->Add("(0)");
break;
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
stream->Add("v%d", unalloc->virtual_register());
if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
stream->Add("(=%dS)", unalloc->fixed_slot_index());
break;
}
switch (unalloc->extended_policy()) {
case LUnallocated::NONE:
break;
case LUnallocated::FIXED_REGISTER: {
int reg_index = unalloc->fixed_register_index();
if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
stream->Add("(=invalid_reg#%d)", reg_index);
} else {
const char* register_name =
GetRegConfig()->GetGeneralRegisterName(reg_index);
stream->Add("(=%s)", register_name);
}
break;
}
case LUnallocated::FIXED_DOUBLE_REGISTER: {
int reg_index = unalloc->fixed_register_index();
if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
stream->Add("(=invalid_double_reg#%d)", reg_index);
} else {
const char* double_register_name =
GetRegConfig()->GetDoubleRegisterName(reg_index);
stream->Add("(=%s)", double_register_name);
}
break;
}
case LUnallocated::MUST_HAVE_REGISTER:
stream->Add("(R)");
break;
case LUnallocated::MUST_HAVE_DOUBLE_REGISTER:
stream->Add("(D)");
break;
case LUnallocated::WRITABLE_REGISTER:
stream->Add("(WR)");
break;
case LUnallocated::SAME_AS_FIRST_INPUT:
stream->Add("(1)");
break;
case LUnallocated::ANY:
stream->Add("(-)");
break;
}
break;
case CONSTANT_OPERAND:
stream->Add("[constant:%d]", index());
break;
case STACK_SLOT:
stream->Add("[stack:%d]", index());
break;
case DOUBLE_STACK_SLOT:
stream->Add("[double_stack:%d]", index());
break;
case REGISTER: {
int reg_index = index();
if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
stream->Add("(=invalid_reg#%d|R)", reg_index);
} else {
stream->Add("[%s|R]",
GetRegConfig()->GetGeneralRegisterName(reg_index));
}
break;
}
case DOUBLE_REGISTER: {
int reg_index = index();
if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
stream->Add("(=invalid_double_reg#%d|R)", reg_index);
} else {
stream->Add("[%s|R]", GetRegConfig()->GetDoubleRegisterName(reg_index));
}
break;
}
}
}
template<LOperand::Kind kOperandKind, int kNumCachedOperands>
LSubKindOperand<kOperandKind, kNumCachedOperands>*
LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
template<LOperand::Kind kOperandKind, int kNumCachedOperands>
void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
if (cache) return;
cache = new LSubKindOperand[kNumCachedOperands];
for (int i = 0; i < kNumCachedOperands; i++) {
cache[i].ConvertTo(kOperandKind, i);
}
}
template<LOperand::Kind kOperandKind, int kNumCachedOperands>
void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
delete[] cache;
cache = NULL;
}
void LOperand::SetUpCaches() {
#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
#undef LITHIUM_OPERAND_SETUP
}
void LOperand::TearDownCaches() {
#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
#undef LITHIUM_OPERAND_TEARDOWN
}
bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false;
}
return true;
}
void LParallelMove::PrintDataTo(StringStream* stream) const {
bool first = true;
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsEliminated()) {
LOperand* source = move_operands_[i].source();
LOperand* destination = move_operands_[i].destination();
if (!first) stream->Add(" ");
first = false;
if (source->Equals(destination)) {
destination->PrintTo(stream);
} else {
destination->PrintTo(stream);
stream->Add(" = ");
source->PrintTo(stream);
}
stream->Add(";");
}
}
}
void LEnvironment::PrintTo(StringStream* stream) {
stream->Add("[id=%d|", ast_id().ToInt());
if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
stream->Add("deopt_id=%d|", deoptimization_index());
}
stream->Add("parameters=%d|", parameter_count());
stream->Add("arguments_stack_height=%d|", arguments_stack_height());
for (int i = 0; i < values_.length(); ++i) {
if (i != 0) stream->Add(";");
if (values_[i] == NULL) {
stream->Add("[hole]");
} else {
values_[i]->PrintTo(stream);
}
}
stream->Add("]");
}
void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
pointer_operands_.Add(op, zone);
}
void LPointerMap::RemovePointer(LOperand* op) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
for (int i = 0; i < pointer_operands_.length(); ++i) {
if (pointer_operands_[i]->Equals(op)) {
pointer_operands_.Remove(i);
--i;
}
}
}
void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
untagged_operands_.Add(op, zone);
}
void LPointerMap::PrintTo(StringStream* stream) {
stream->Add("{");
for (int i = 0; i < pointer_operands_.length(); ++i) {
if (i != 0) stream->Add(";");
pointer_operands_[i]->PrintTo(stream);
}
stream->Add("}");
}
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: base_frame_slots_(info->IsStub()
? TypedFrameConstants::kFixedSlotCount
: StandardFrameConstants::kFixedSlotCount),
current_frame_slots_(base_frame_slots_),
info_(info),
graph_(graph),
instructions_(32, info->zone()),
pointer_maps_(8, info->zone()),
deprecation_dependencies_(32, info->zone()),
stability_dependencies_(8, info->zone()) {}
LLabel* LChunk::GetLabel(int block_id) const {
HBasicBlock* block = graph_->blocks()->at(block_id);
int first_instruction = block->first_instruction_index();
return LLabel::cast(instructions_[first_instruction]);
}
int LChunk::LookupDestination(int block_id) const {
LLabel* cur = GetLabel(block_id);
while (cur->replacement() != NULL) {
cur = cur->replacement();
}
return cur->block_id();
}
Label* LChunk::GetAssemblyLabel(int block_id) const {
LLabel* label = GetLabel(block_id);
DCHECK(!label->HasReplacement());
return label->label();
}
void LChunk::MarkEmptyBlocks() {
LPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
int last = block->last_instruction_index();
LInstruction* first_instr = instructions()->at(first);
LInstruction* last_instr = instructions()->at(last);
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
LInstruction* cur = instructions()->at(i);
if (cur->IsGap()) {
LGap* gap = LGap::cast(cur);
if (!gap->IsRedundant()) {
can_eliminate = false;
}
} else {
can_eliminate = false;
}
}
if (can_eliminate) {
label->set_replacement(GetLabel(goto_instr->block_id()));
}
}
}
}
}
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new (zone()) LInstructionGap(block);
gap->set_hydrogen_value(instr->hydrogen_value());
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap, zone());
index = instructions_.length();
instructions_.Add(instr, zone());
} else {
index = instructions_.length();
instructions_.Add(instr, zone());
instructions_.Add(gap, zone());
}
if (instr->HasPointerMap()) {
pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
return LConstantOperand::Create(constant->id(), zone());
}
int LChunk::GetParameterStackSlot(int index) const {
// The receiver is at index 0, the first parameter at index 1, so we
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
int result = index - info()->num_parameters() - 1;
DCHECK(result < 0);
return result;
}
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
DCHECK(-1 <= index); // -1 is the receiver.
return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
LGap* LChunk::GetGapAt(int index) const {
return LGap::cast(instructions_[index]);
}
bool LChunk::IsGapAt(int index) const {
return instructions_[index]->IsGap();
}
int LChunk::NearestGapPos(int index) const {
while (!IsGapAt(index)) index--;
return index;
}
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(
LGap::START, zone())->AddMove(from, to, zone());
}
HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
return HConstant::cast(graph_->LookupValue(operand->index()));
}
Representation LChunk::LookupLiteralRepresentation(
LConstantOperand* operand) const {
return graph_->LookupValue(operand->index())->representation();
}
void LChunk::CommitDependencies(Handle<Code> code) const {
if (!code->is_optimized_code()) return;
HandleScope scope(isolate());
for (Handle<Map> map : deprecation_dependencies_) {
DCHECK(!map->is_deprecated());
DCHECK(map->CanBeDeprecated());
Map::AddDependentCode(map, DependentCode::kTransitionGroup, code);
}
for (Handle<Map> map : stability_dependencies_) {
DCHECK(map->is_stable());
DCHECK(map->CanTransition());
Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code);
}
info_->dependencies()->Commit(code);
}
LChunk* LChunk::NewChunk(HGraph* graph) {
DisallowHandleAllocation no_handles;
DisallowHeapAllocation no_gc;
graph->DisallowAddingNewValues();
int values = graph->GetMaximumValueID();
CompilationInfo* info = graph->info();
if (values > LUnallocated::kMaxVirtualRegisters) {
info->AbortOptimization(kNotEnoughVirtualRegistersForValues);
return NULL;
}
LAllocator allocator(values, graph);
LChunkBuilder builder(info, graph, &allocator);
LChunk* chunk = builder.Build();
if (chunk == NULL) return NULL;
if (!allocator.Allocate(chunk)) {
info->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return NULL;
}
chunk->set_allocated_double_registers(
allocator.assigned_double_registers());
return chunk;
}
Handle<Code> LChunk::Codegen() {
MacroAssembler assembler(info()->isolate(), NULL, 0,
CodeObjectRequired::kYes);
// Code serializer only takes unoptimized code.
DCHECK(!info()->will_serialize());
LCodeGen generator(this, &assembler, info());
MarkEmptyBlocks();
if (generator.GenerateCode()) {
generator.CheckEnvironmentUsage();
CodeGenerator::MakeCodePrologue(info(), "optimized");
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(
&assembler, nullptr, info(), assembler.CodeObject());
generator.FinishCode(code);
CommitDependencies(code);
Handle<ByteArray> source_positions =
generator.source_position_table_builder()->ToSourcePositionTable(
info()->isolate(), Handle<AbstractCode>::cast(code));
code->set_source_position_table(*source_positions);
code->set_is_crankshafted(true);
CodeGenerator::PrintCode(code, info());
return code;
}
assembler.AbortedCodeGeneration();
return Handle<Code>::null();
}
void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
allocated_double_registers_ = allocated_registers;
BitVector* doubles = allocated_double_registers();
BitVector::Iterator iterator(doubles);
while (!iterator.Done()) {
if (info()->saves_caller_doubles()) {
if (kDoubleSize == kPointerSize * 2) {
current_frame_slots_ += 2;
} else {
current_frame_slots_++;
}
}
iterator.Advance();
}
}
void LChunkBuilderBase::Abort(BailoutReason reason) {
info()->AbortOptimization(reason);
status_ = ABORTED;
}
void LChunkBuilderBase::Retry(BailoutReason reason) {
info()->RetryOptimization(reason);
status_ = ABORTED;
}
void LChunkBuilderBase::CreateLazyBailoutForCall(HBasicBlock* current_block,
LInstruction* instr,
HInstruction* hydrogen_val) {
if (!instr->IsCall()) return;
HEnvironment* hydrogen_env = current_block->last_environment();
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
DCHECK_NOT_NULL(hydrogen_env);
if (instr->IsSyntacticTailCall()) {
// If it was a syntactic tail call we need to drop the current frame and
// all the frames on top of it that are either an arguments adaptor frame
// or a tail caller frame.
hydrogen_env = hydrogen_env->outer();
while (hydrogen_env != nullptr &&
(hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR ||
hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION)) {
hydrogen_env = hydrogen_env->outer();
}
if (hydrogen_env != nullptr) {
if (hydrogen_env->frame_type() == JS_FUNCTION) {
// In case an outer frame is a function frame we have to replay
// environment manually because
// 1) it does not contain a result of inlined function yet,
// 2) we can't find the proper simulate that corresponds to the point
// after inlined call to do a ReplayEnvironment() on.
// So we push return value on top of outer environment.
// As for JS_GETTER/JS_SETTER/JS_CONSTRUCT nothing has to be done here,
// the deoptimizer ensures that the result of the callee is correctly
// propagated to result register during deoptimization.
hydrogen_env = hydrogen_env->Copy();
hydrogen_env->Push(hydrogen_val);
}
} else {
// Although we don't need this lazy bailout for normal execution
// (because when we tail call from the outermost function we should pop
// its frame) we still need it when debugger is on.
hydrogen_env = current_block->last_environment();
}
} else {
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
sim->ReplayEnvironment(hydrogen_env);
hydrogen_value_for_lazy_bailout = sim;
}
}
LInstruction* bailout = LChunkBuilderBase::AssignEnvironment(
new (zone()) LLazyBailout(), hydrogen_env);
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block);
}
LInstruction* LChunkBuilderBase::AssignEnvironment(LInstruction* instr,
HEnvironment* hydrogen_env) {
int argument_index_accumulator = 0;
ZoneList<HValue*> objects_to_materialize(0, zone());
DCHECK_NE(TAIL_CALLER_FUNCTION, hydrogen_env->frame_type());
instr->set_environment(CreateEnvironment(
hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
return instr;
}
LEnvironment* LChunkBuilderBase::CreateEnvironment(
HEnvironment* hydrogen_env, int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize) {
if (hydrogen_env == NULL) return NULL;
BailoutId ast_id = hydrogen_env->ast_id();
DCHECK(!ast_id.IsNone() ||
(hydrogen_env->frame_type() != JS_FUNCTION &&
hydrogen_env->frame_type() != TAIL_CALLER_FUNCTION));
if (hydrogen_env->frame_type() == TAIL_CALLER_FUNCTION) {
// Skip potential outer arguments adaptor frame.
HEnvironment* outer_hydrogen_env = hydrogen_env->outer();
if (outer_hydrogen_env != nullptr &&
outer_hydrogen_env->frame_type() == ARGUMENTS_ADAPTOR) {
outer_hydrogen_env = outer_hydrogen_env->outer();
}
LEnvironment* outer = CreateEnvironment(
outer_hydrogen_env, argument_index_accumulator, objects_to_materialize);
return new (zone())
LEnvironment(hydrogen_env->closure(), hydrogen_env->frame_type(),
ast_id, 0, 0, 0, outer, hydrogen_env->entry(), zone());
}
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
objects_to_materialize);
int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
? 0
: hydrogen_env->specials_count();
int value_count = hydrogen_env->length() - omitted_count;
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
hydrogen_env->frame_type(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer,
hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
// Store the environment description into the environment
// (with holes for nested objects)
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i) &&
hydrogen_env->frame_type() != JS_FUNCTION) {
continue;
}
LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
CHECK(!value->IsPushArguments()); // Do not deopt outgoing arguments
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
op = LEnvironment::materialization_marker();
} else {
op = UseAny(value);
}
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
}
// Recursively store the nested objects into the environment
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
HValue* value = hydrogen_env->values()->at(i);
if (value->IsArgumentsObject() || value->IsCapturedObject()) {
AddObjectToMaterialize(value, objects_to_materialize, result);
}
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
return result;
}
// Add an object to the supplied environment and object materialization list.
//
// Notes:
//
// We are building three lists here:
//
// 1. In the result->object_mapping_ list (added to by the
// LEnvironment::Add*Object methods), we store the lengths (number
// of fields) of the captured objects in depth-first traversal order, or
// in case of duplicated objects, we store the index to the duplicate object
// (with a tag to differentiate between captured and duplicated objects).
//
// 2. The object fields are stored in the result->values_ list
// (added to by the LEnvironment.AddValue method) sequentially as lists
// of fields with holes for nested objects (the holes will be expanded
// later by LCodegen::AddToTranslation according to the
// LEnvironment.object_mapping_ list).
//
// 3. The auxiliary objects_to_materialize array stores the hydrogen values
// in the same order as result->object_mapping_ list. This is used
// to detect duplicate values and calculate the corresponding object index.
void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
int object_index = objects_to_materialize->length();
// Store the hydrogen value into the de-duplication array
objects_to_materialize->Add(value, zone());
// Find out whether we are storing a duplicated value
int previously_materialized_object = -1;
for (int prev = 0; prev < object_index; ++prev) {
if (objects_to_materialize->at(prev) == value) {
previously_materialized_object = prev;
break;
}
}
// Store the captured object length (or duplicated object index)
// into the environment. For duplicated objects, we stop here.
int length = value->OperandCount();
bool is_arguments = value->IsArgumentsObject();
if (previously_materialized_object >= 0) {
result->AddDuplicateObject(previously_materialized_object);
return;
} else {
result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
}
// Store the captured object's fields into the environment
for (int i = is_arguments ? 1 : 0; i < length; ++i) {
LOperand* op;
HValue* arg_value = value->OperandAt(i);
if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
// Insert a hole for nested objects
op = LEnvironment::materialization_marker();
} else {
DCHECK(!arg_value->IsPushArguments());
// For ordinary values, tell the register allocator we need the value
// to be alive here
op = UseAny(arg_value);
}
result->AddValue(op,
arg_value->representation(),
arg_value->CheckFlag(HInstruction::kUint32));
}
// Recursively store all the nested captured objects into the environment
for (int i = is_arguments ? 1 : 0; i < length; ++i) {
HValue* arg_value = value->OperandAt(i);
if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
AddObjectToMaterialize(arg_value, objects_to_materialize, result);
}
}
}
LPhase::~LPhase() {
if (ShouldProduceTraceOutput()) {
isolate()->GetHTracer()->TraceLithium(name(), chunk_);
}
}
} // namespace internal
} // namespace v8

View File

@ -1,847 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_LITHIUM_H_
#define V8_CRANKSHAFT_LITHIUM_H_
#include <set>
#include "src/allocation.h"
#include "src/bailout-reason.h"
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen.h"
#include "src/safepoint-table.h"
#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
#define LITHIUM_OPERAND_LIST(V) \
V(ConstantOperand, CONSTANT_OPERAND, 128) \
V(StackSlot, STACK_SLOT, 128) \
V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
V(Register, REGISTER, 16) \
V(DoubleRegister, DOUBLE_REGISTER, 16)
class LOperand : public ZoneObject {
public:
enum Kind {
INVALID,
UNALLOCATED,
CONSTANT_OPERAND,
STACK_SLOT,
DOUBLE_STACK_SLOT,
REGISTER,
DOUBLE_REGISTER
};
LOperand() : value_(KindField::encode(INVALID)) { }
Kind kind() const { return KindField::decode(value_); }
int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
#define LITHIUM_OPERAND_PREDICATE(name, type, number) \
bool Is##name() const { return kind() == type; }
LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
#undef LITHIUM_OPERAND_PREDICATE
bool Equals(LOperand* other) const { return value_ == other->value_; }
void PrintTo(StringStream* stream);
void ConvertTo(Kind kind, int index) {
if (kind == REGISTER) DCHECK(index >= 0);
value_ = KindField::encode(kind);
value_ |= index << kKindFieldWidth;
DCHECK(this->index() == index);
}
// Calls SetUpCache()/TearDownCache() for each subclass.
static void SetUpCaches();
static void TearDownCaches();
protected:
static const int kKindFieldWidth = 3;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
LOperand(Kind kind, int index) { ConvertTo(kind, index); }
unsigned value_;
};
class LUnallocated : public LOperand {
public:
enum BasicPolicy {
FIXED_SLOT,
EXTENDED_POLICY
};
enum ExtendedPolicy {
NONE,
ANY,
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
MUST_HAVE_REGISTER,
MUST_HAVE_DOUBLE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT
};
// Lifetime of operand inside the instruction.
enum Lifetime {
// USED_AT_START operand is guaranteed to be live only at
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
USED_AT_START,
// USED_AT_END operand is treated as live until the end of
// instruction. This means that register allocator will not reuse it's
// register for any other operand inside instruction.
USED_AT_END
};
explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
}
LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
DCHECK(policy == FIXED_SLOT);
value_ |= BasicPolicyField::encode(policy);
value_ |= index << FixedSlotIndexField::kShift;
DCHECK(this->fixed_slot_index() == index);
}
LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
value_ |= FixedRegisterField::encode(index);
}
LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
: LOperand(UNALLOCATED, 0) {
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(lifetime);
}
LUnallocated* CopyUnconstrained(Zone* zone) {
LUnallocated* result = new(zone) LUnallocated(ANY);
result->set_virtual_register(virtual_register());
return result;
}
static LUnallocated* cast(LOperand* op) {
DCHECK(op->IsUnallocated());
return reinterpret_cast<LUnallocated*>(op);
}
// The encoding used for LUnallocated operands depends on the policy that is
// stored within the operand. The FIXED_SLOT policy uses a compact encoding
// because it accommodates a larger pay-load.
//
// For FIXED_SLOT policy:
// +------------------------------------------+
// | slot_index | vreg | 0 | 001 |
// +------------------------------------------+
//
// For all other (extended) policies:
// +------------------------------------------+
// | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
// +------------------------------------------+ P ... Policy
//
// The slot index is a signed value which requires us to decode it manually
// instead of using the BitField utility class.
// The superclass has a KindField.
STATIC_ASSERT(kKindFieldWidth == 3);
// BitFields for all unallocated operands.
class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
// BitFields specific to BasicPolicy::FIXED_SLOT.
class FixedSlotIndexField : public BitField<int, 22, 10> {};
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
class LifetimeField : public BitField<Lifetime, 25, 1> {};
class FixedRegisterField : public BitField<int, 26, 6> {};
static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == ANY;
}
bool HasFixedPolicy() const {
return basic_policy() == FIXED_SLOT ||
extended_policy() == FIXED_REGISTER ||
extended_policy() == FIXED_DOUBLE_REGISTER;
}
bool HasRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY && (
extended_policy() == WRITABLE_REGISTER ||
extended_policy() == MUST_HAVE_REGISTER);
}
bool HasDoubleRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == MUST_HAVE_DOUBLE_REGISTER;
}
bool HasSameAsInputPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == SAME_AS_FIRST_INPUT;
}
bool HasFixedSlotPolicy() const {
return basic_policy() == FIXED_SLOT;
}
bool HasFixedRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_REGISTER;
}
bool HasFixedDoubleRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_DOUBLE_REGISTER;
}
bool HasWritableRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == WRITABLE_REGISTER;
}
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
BasicPolicy basic_policy() const {
return BasicPolicyField::decode(value_);
}
// [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
ExtendedPolicy extended_policy() const {
DCHECK(basic_policy() == EXTENDED_POLICY);
return ExtendedPolicyField::decode(value_);
}
// [fixed_slot_index]: Only for FIXED_SLOT.
int fixed_slot_index() const {
DCHECK(HasFixedSlotPolicy());
return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
}
// [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
int fixed_register_index() const {
DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
return FixedRegisterField::decode(value_);
}
// [virtual_register]: The virtual register ID for this operand.
int virtual_register() const {
return VirtualRegisterField::decode(value_);
}
void set_virtual_register(unsigned id) {
value_ = VirtualRegisterField::update(value_, id);
}
// [lifetime]: Only for non-FIXED_SLOT.
bool IsUsedAtStart() {
DCHECK(basic_policy() == EXTENDED_POLICY);
return LifetimeField::decode(value_) == USED_AT_START;
}
static bool TooManyParameters(int num_parameters) {
const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
return num_parameters + 1 > parameter_limit;
}
static bool TooManyParametersOrStackSlots(int num_parameters,
int num_stack_slots) {
const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
return num_parameters + 1 + num_stack_slots > locals_limit;
}
};
class LMoveOperands final BASE_EMBEDDED {
public:
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
}
LOperand* source() const { return source_; }
void set_source(LOperand* operand) { source_ = operand; }
LOperand* destination() const { return destination_; }
void set_destination(LOperand* operand) { destination_ = operand; }
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const {
return destination_ == NULL && source_ != NULL;
}
// True if this move a move into the given destination operand.
bool Blocks(LOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
}
// A move is redundant if it's been eliminated, if its source and
// destination are the same, or if its destination is unneeded or constant.
bool IsRedundant() const {
return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
(destination_ != NULL && destination_->IsConstantOperand());
}
bool IsIgnored() const {
return destination_ != NULL && destination_->IsIgnored();
}
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
bool IsEliminated() const {
DCHECK(source_ != NULL || destination_ == NULL);
return source_ == NULL;
}
private:
LOperand* source_;
LOperand* destination_;
};
template <LOperand::Kind kOperandKind, int kNumCachedOperands>
class LSubKindOperand final : public LOperand {
public:
static LSubKindOperand* Create(int index, Zone* zone) {
DCHECK(index >= 0);
if (index < kNumCachedOperands) return &cache[index];
return new(zone) LSubKindOperand(index);
}
static LSubKindOperand* cast(LOperand* op) {
DCHECK(op->kind() == kOperandKind);
return reinterpret_cast<LSubKindOperand*>(op);
}
static void SetUpCache();
static void TearDownCache();
private:
static LSubKindOperand* cache;
LSubKindOperand() : LOperand() { }
explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { }
};
#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
typedef LSubKindOperand<LOperand::type, number> L##name;
LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
class LParallelMove final : public ZoneObject {
public:
explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
void AddMove(LOperand* from, LOperand* to, Zone* zone) {
move_operands_.Add(LMoveOperands(from, to), zone);
}
bool IsRedundant() const;
ZoneList<LMoveOperands>* move_operands() { return &move_operands_; }
void PrintDataTo(StringStream* stream) const;
private:
ZoneList<LMoveOperands> move_operands_;
};
class LPointerMap final : public ZoneObject {
public:
explicit LPointerMap(Zone* zone)
: pointer_operands_(8, zone),
untagged_operands_(0, zone),
lithium_position_(-1) { }
const ZoneList<LOperand*>* GetNormalizedOperands() {
for (int i = 0; i < untagged_operands_.length(); ++i) {
RemovePointer(untagged_operands_[i]);
}
untagged_operands_.Clear();
return &pointer_operands_;
}
int lithium_position() const { return lithium_position_; }
void set_lithium_position(int pos) {
DCHECK(lithium_position_ == -1);
lithium_position_ = pos;
}
void RecordPointer(LOperand* op, Zone* zone);
void RemovePointer(LOperand* op);
void RecordUntagged(LOperand* op, Zone* zone);
void PrintTo(StringStream* stream);
private:
ZoneList<LOperand*> pointer_operands_;
ZoneList<LOperand*> untagged_operands_;
int lithium_position_;
};
class LEnvironment final : public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
FrameType frame_type,
BailoutId ast_id,
int parameter_count,
int argument_count,
int value_count,
LEnvironment* outer,
HEnterInlined* entry,
Zone* zone)
: closure_(closure),
frame_type_(frame_type),
arguments_stack_height_(argument_count),
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
ast_id_(ast_id),
translation_size_(value_count),
parameter_count_(parameter_count),
pc_offset_(-1),
values_(value_count, zone),
is_tagged_(value_count, zone),
is_uint32_(value_count, zone),
object_mapping_(0, zone),
outer_(outer),
entry_(entry),
zone_(zone),
has_been_used_(false) { }
Handle<JSFunction> closure() const { return closure_; }
FrameType frame_type() const { return frame_type_; }
int arguments_stack_height() const { return arguments_stack_height_; }
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
BailoutId ast_id() const { return ast_id_; }
int translation_size() const { return translation_size_; }
int parameter_count() const { return parameter_count_; }
int pc_offset() const { return pc_offset_; }
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
HEnterInlined* entry() { return entry_; }
Zone* zone() const { return zone_; }
bool has_been_used() const { return has_been_used_; }
void set_has_been_used() { has_been_used_ = true; }
void AddValue(LOperand* operand,
Representation representation,
bool is_uint32) {
values_.Add(operand, zone());
if (representation.IsSmiOrTagged()) {
DCHECK(!is_uint32);
is_tagged_.Add(values_.length() - 1, zone());
}
if (is_uint32) {
is_uint32_.Add(values_.length() - 1, zone());
}
}
bool HasTaggedValueAt(int index) const {
return is_tagged_.Contains(index);
}
bool HasUint32ValueAt(int index) const {
return is_uint32_.Contains(index);
}
void AddNewObject(int length, bool is_arguments) {
uint32_t encoded = LengthOrDupeField::encode(length) |
IsArgumentsField::encode(is_arguments) |
IsDuplicateField::encode(false);
object_mapping_.Add(encoded, zone());
}
void AddDuplicateObject(int dupe_of) {
uint32_t encoded = LengthOrDupeField::encode(dupe_of) |
IsDuplicateField::encode(true);
object_mapping_.Add(encoded, zone());
}
int ObjectDuplicateOfAt(int index) {
DCHECK(ObjectIsDuplicateAt(index));
return LengthOrDupeField::decode(object_mapping_[index]);
}
int ObjectLengthAt(int index) {
DCHECK(!ObjectIsDuplicateAt(index));
return LengthOrDupeField::decode(object_mapping_[index]);
}
bool ObjectIsArgumentsAt(int index) {
DCHECK(!ObjectIsDuplicateAt(index));
return IsArgumentsField::decode(object_mapping_[index]);
}
bool ObjectIsDuplicateAt(int index) {
return IsDuplicateField::decode(object_mapping_[index]);
}
void Register(int deoptimization_index,
int translation_index,
int pc_offset) {
DCHECK(!HasBeenRegistered());
deoptimization_index_ = deoptimization_index;
translation_index_ = translation_index;
pc_offset_ = pc_offset;
}
bool HasBeenRegistered() const {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
}
void PrintTo(StringStream* stream);
// Marker value indicating a de-materialized object.
static LOperand* materialization_marker() { return NULL; }
// Encoding used for the object_mapping map below.
class LengthOrDupeField : public BitField<int, 0, 30> { };
class IsArgumentsField : public BitField<bool, 30, 1> { };
class IsDuplicateField : public BitField<bool, 31, 1> { };
private:
Handle<JSFunction> closure_;
FrameType frame_type_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;
BailoutId ast_id_;
int translation_size_;
int parameter_count_;
int pc_offset_;
// Value array: [parameters] [locals] [expression stack] [de-materialized].
// |>--------- translation_size ---------<|
ZoneList<LOperand*> values_;
GrowableBitVector is_tagged_;
GrowableBitVector is_uint32_;
// Map with encoded information about materialization_marker operands.
ZoneList<uint32_t> object_mapping_;
LEnvironment* outer_;
HEnterInlined* entry_;
Zone* zone_;
bool has_been_used_;
};
// Iterates over the non-null, non-constant operands in an environment.
class ShallowIterator final BASE_EMBEDDED {
public:
explicit ShallowIterator(LEnvironment* env)
: env_(env),
limit_(env != NULL ? env->values()->length() : 0),
current_(0) {
SkipUninteresting();
}
bool Done() { return current_ >= limit_; }
LOperand* Current() {
DCHECK(!Done());
DCHECK(env_->values()->at(current_) != NULL);
return env_->values()->at(current_);
}
void Advance() {
DCHECK(!Done());
++current_;
SkipUninteresting();
}
LEnvironment* env() { return env_; }
private:
bool ShouldSkip(LOperand* op) {
return op == NULL || op->IsConstantOperand();
}
// Skip until something interesting, beginning with and including current_.
void SkipUninteresting() {
while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) {
++current_;
}
}
LEnvironment* env_;
int limit_;
int current_;
};
// Iterator for non-null, non-constant operands incl. outer environments.
class DeepIterator final BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
: current_iterator_(env) {
SkipUninteresting();
}
bool Done() { return current_iterator_.Done(); }
LOperand* Current() {
DCHECK(!current_iterator_.Done());
DCHECK(current_iterator_.Current() != NULL);
return current_iterator_.Current();
}
void Advance() {
current_iterator_.Advance();
SkipUninteresting();
}
private:
void SkipUninteresting() {
while (current_iterator_.env() != NULL && current_iterator_.Done()) {
current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
}
}
ShallowIterator current_iterator_;
};
class LPlatformChunk;
class LGap;
class LLabel;
// Superclass providing data and behavior common to all the
// arch-specific LPlatformChunk classes.
class LChunk : public ZoneObject {
public:
static LChunk* NewChunk(HGraph* graph);
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
HConstant* LookupConstant(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
bool HasAllocatedStackSlots() const {
return current_frame_slots_ != base_frame_slots_;
}
int GetSpillSlotCount() const {
return current_frame_slots_ - base_frame_slots_;
}
int GetTotalFrameSlotCount() const { return current_frame_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Isolate* isolate() const { return graph_->isolate(); }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const;
int LookupDestination(int block_id) const;
Label* GetAssemblyLabel(int block_id) const;
void AddDeprecationDependency(Handle<Map> map) {
DCHECK(!map->is_deprecated());
if (!map->CanBeDeprecated()) return;
DCHECK(!info_->IsStub());
deprecation_dependencies_.Add(map, zone());
}
void AddStabilityDependency(Handle<Map> map) {
DCHECK(map->is_stable());
if (!map->CanTransition()) return;
DCHECK(!info_->IsStub());
stability_dependencies_.Add(map, zone());
}
Zone* zone() const { return info_->zone(); }
Handle<Code> Codegen();
void set_allocated_double_registers(BitVector* allocated_registers);
BitVector* allocated_double_registers() {
return allocated_double_registers_;
}
protected:
LChunk(CompilationInfo* info, HGraph* graph);
int base_frame_slots_;
int current_frame_slots_;
private:
void CommitDependencies(Handle<Code> code) const;
CompilationInfo* info_;
HGraph* const graph_;
BitVector* allocated_double_registers_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
ZoneList<Handle<Map>> deprecation_dependencies_;
ZoneList<Handle<Map>> stability_dependencies_;
};
class LChunkBuilderBase BASE_EMBEDDED {
public:
explicit LChunkBuilderBase(CompilationInfo* info, HGraph* graph)
: argument_count_(0),
chunk_(NULL),
info_(info),
graph_(graph),
status_(UNUSED),
zone_(graph->zone()) {}
virtual ~LChunkBuilderBase() { }
void Abort(BailoutReason reason);
void Retry(BailoutReason reason);
protected:
enum Status { UNUSED, BUILDING, DONE, ABORTED };
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
int argument_count() const { return argument_count_; }
Isolate* isolate() const { return graph_->isolate(); }
Heap* heap() const { return isolate()->heap(); }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
// Constructs proper environment for a lazy bailout point after call, creates
// LLazyBailout instruction and adds it to current block.
void CreateLazyBailoutForCall(HBasicBlock* current_block, LInstruction* instr,
HInstruction* hydrogen_val);
// Assigns given environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr,
HEnvironment* hydrogen_env);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator,
ZoneList<HValue*>* objects_to_materialize);
void AddObjectToMaterialize(HValue* value,
ZoneList<HValue*>* objects_to_materialize,
LEnvironment* result);
Zone* zone() const { return zone_; }
int argument_count_;
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Status status_;
private:
Zone* zone_;
};
enum NumberUntagDMode {
NUMBER_CANDIDATE_IS_SMI,
NUMBER_CANDIDATE_IS_ANY_TAGGED
};
class LPhase : public CompilationPhase {
public:
LPhase(const char* name, LChunk* chunk)
: CompilationPhase(name, chunk->info()),
chunk_(chunk) { }
~LPhase();
private:
LChunk* chunk_;
DISALLOW_COPY_AND_ASSIGN(LPhase);
};
// A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses.
enum RegisterKind {
UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
// Iterator for non-null temp operands.
class TempIterator BASE_EMBEDDED {
public:
inline explicit TempIterator(LInstruction* instr);
inline bool Done();
inline LOperand* Current();
inline void Advance();
private:
inline void SkipUninteresting();
LInstruction* instr_;
int limit_;
int current_;
};
// Iterator for non-constant input operands.
class InputIterator BASE_EMBEDDED {
public:
inline explicit InputIterator(LInstruction* instr);
inline bool Done();
inline LOperand* Current();
inline void Advance();
private:
inline void SkipUninteresting();
LInstruction* instr_;
int limit_;
int current_;
};
class UseIterator BASE_EMBEDDED {
public:
inline explicit UseIterator(LInstruction* instr);
inline bool Done();
inline LOperand* Current();
inline void Advance();
private:
InputIterator input_iterator_;
DeepIterator env_iterator_;
};
class LInstruction;
class LCodeGen;
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_LITHIUM_H_

View File

@ -1,3 +0,0 @@
ivica.bogosavljevic@imgtec.com
Miran.Karic@imgtec.com
dusan.simicic@imgtec.com

File diff suppressed because it is too large Load Diff

View File

@ -1,405 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
#include "src/ast/scopes.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
#include "src/crankshaft/mips/lithium-mips.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
!info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
RAStatus GetRAState() const {
return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagIU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
MemOperand PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
int constant_key,
int element_size,
int shift_size,
int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratchReg; }
Register scratch1() { return kLithiumScratchReg2; }
DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true, Label* if_false,
Handle<String> class_name, Register input,
Register temporary, Register temporary2);
bool HasAllocatedStackSlots() const {
return chunk()->HasAllocatedStackSlots();
}
int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
int GetTotalFrameSlotCount() const {
return chunk()->GetTotalFrameSlotCount();
}
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
Register scratch2, Register scratch3);
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
MemOperand BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding);
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr,
Condition condition,
Register src1,
const Operand& src2);
template<class InstrType>
void EmitBranchF(InstrType instr,
Condition condition,
FPURegister src1,
FPURegister src2);
template <class InstrType>
void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
const Operand& src2);
template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
const Operand& src2);
template<class InstrType>
void EmitFalseBranchF(InstrType instr,
Condition condition,
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
DoubleRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
// Returns two registers in cmp1 and cmp2 that can be used in the
// Branch instruction after EmitTypeofIs.
Condition EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name,
Register* cmp1,
Operand* cmp2);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
Label* is_not_string,
SmiCheck check_needed);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset,
AllocationSiteMode mode);
// Emit optimized code for integer division.
// Inputs are signed.
// All registers are clobbered.
// If 'remainder' is no_reg, it is not computed.
void EmitSignedIntegerDivisionByConstant(Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment);
void EnsureSpaceForLazyDeopt(int space_needed) override;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen);
~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_

View File

@ -1,298 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
#include "src/crankshaft/mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32, owner->zone()),
root_index_(0),
in_cycle_(false),
saved_destination_(NULL) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
DCHECK(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when by reaching this move again.
PerformMove(i);
if (in_cycle_) {
RestoreValue();
}
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
// We can only find a cycle, when doing a depth-first traversal of moves,
// be encountering the starting move again. So by spilling the source of
// the starting move, we break the cycle. All moves are then unblocked,
// and the starting move is completed by writing the spilled value to
// its destination. All other moves from the spilled source have been
// completed prior to breaking the cycle.
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
DCHECK(!moves_[index].IsPending());
DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
// We save in a register the value that should end up in the source of
// moves_[root_index]. After performing all moves in the tree rooted
// in that move, we save the value to that source.
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
DCHECK(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// This move will be done by restoring the saved value to the destination.
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
DCHECK(in_cycle_);
DCHECK(saved_destination_ != NULL);
// Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
__ sw(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ lw(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
__ lwc1(kLithiumScratchDouble, source_operand);
__ swc1(kLithiumScratchDouble, destination_operand);
} else {
__ lw(at, source_operand);
__ sw(at, destination_operand);
}
} else {
__ lw(kLithiumScratchReg, source_operand);
__ sw(kLithiumScratchReg, destination_operand);
}
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ li(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Move(result, v);
} else {
DCHECK(destination->IsStackSlot());
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg,
Operand(cgen_->ToRepresentation(constant_source, r)));
} else {
__ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
}
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ Sdc1(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kLithiumScratchDouble was used to break the cycle,
// but kLithiumScratchReg is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ lw(kLithiumScratchReg, source_operand);
__ sw(kLithiumScratchReg, destination_operand);
__ lw(kLithiumScratchReg, source_high_operand);
__ sw(kLithiumScratchReg, destination_high_operand);
} else {
__ Ldc1(kLithiumScratchDouble, source_operand);
__ Sdc1(kLithiumScratchDouble, destination_operand);
}
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
#undef __
} // namespace internal
} // namespace v8

View File

@ -1,59 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver final BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +0,0 @@
ivica.bogosavljevic@imgtec.com
Miran.Karic@imgtec.com
dusan.simicic@imgtec.com

File diff suppressed because it is too large Load Diff

View File

@ -1,408 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
#define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
#include "src/ast/scopes.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
#include "src/crankshaft/mips64/lithium-mips64.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
!info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
RAStatus GetRAState() const {
return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
int64_t ToRepresentation_donotuse(LConstantOperand* op,
const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagIU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
MemOperand PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
int constant_key,
int element_size,
int shift_size,
int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratchReg; }
Register scratch1() { return kLithiumScratchReg2; }
DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true, Label* if_false,
Handle<String> class_name, Register input,
Register temporary, Register temporary2);
bool HasAllocatedStackSlots() const {
return chunk()->HasAllocatedStackSlots();
}
int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
int GetTotalFrameSlotCount() const {
return chunk()->GetTotalFrameSlotCount();
}
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
Register scratch2, Register scratch3);
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
MemOperand BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding);
void EmitIntegerMathAbs(LMathAbs* instr);
void EmitSmiMathAbs(LMathAbs* instr);
// Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr,
Condition condition,
Register src1,
const Operand& src2);
template<class InstrType>
void EmitBranchF(InstrType instr,
Condition condition,
FPURegister src1,
FPURegister src2);
template <class InstrType>
void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
const Operand& src2);
template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
const Operand& src2);
template<class InstrType>
void EmitFalseBranchF(InstrType instr,
Condition condition,
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
DoubleRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
// Returns two registers in cmp1 and cmp2 that can be used in the
// Branch instruction after EmitTypeofIs.
Condition EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name,
Register* cmp1,
Operand* cmp2);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input,
Register temp1,
Label* is_not_string,
SmiCheck check_needed);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset,
AllocationSiteMode mode);
// Emit optimized code for integer division.
// Inputs are signed.
// All registers are clobbered.
// If 'remainder' is no_reg, it is not computed.
void EmitSignedIntegerDivisionByConstant(Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment);
void EnsureSpaceForLazyDeopt(int space_needed) override;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen);
~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_

View File

@ -1,299 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32, owner->zone()),
root_index_(0),
in_cycle_(false),
saved_destination_(NULL) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
DCHECK(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when by reaching this move again.
PerformMove(i);
if (in_cycle_) {
RestoreValue();
}
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
// We can only find a cycle, when doing a depth-first traversal of moves,
// be encountering the starting move again. So by spilling the source of
// the starting move, we break the cycle. All moves are then unblocked,
// and the starting move is completed by writing the spilled value to
// its destination. All other moves from the spilled source have been
// completed prior to breaking the cycle.
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
DCHECK(!moves_[index].IsPending());
DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
// We save in a register the value that should end up in the source of
// moves_[root_index]. After performing all moves in the tree rooted
// in that move, we save the value to that source.
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
DCHECK(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ Ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// This move will be done by restoring the saved value to the destination.
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
DCHECK(in_cycle_);
DCHECK(saved_destination_ != NULL);
// Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
__ Sd(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ Ld(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
__ Ldc1(kLithiumScratchDouble, source_operand);
__ Sdc1(kLithiumScratchDouble, destination_operand);
} else {
__ Ld(at, source_operand);
__ Sd(at, destination_operand);
}
} else {
__ Ld(kLithiumScratchReg, source_operand);
__ Sd(kLithiumScratchReg, destination_operand);
}
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ li(dst, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32(constant_source)) {
__ li(dst, Operand(cgen_->ToInteger32(constant_source)));
} else {
__ li(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Move(result, v);
} else {
DCHECK(destination->IsStackSlot());
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsSmi(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else {
__ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
}
} else if (source->IsDoubleRegister()) {
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ Sdc1(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kLithiumScratchDouble was used to break the cycle,
// but kLithiumScratchReg is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ Lw(kLithiumScratchReg, source_operand);
__ Sw(kLithiumScratchReg, destination_operand);
__ Lw(kLithiumScratchReg, source_high_operand);
__ Sw(kLithiumScratchReg, destination_high_operand);
} else {
__ Ldc1(kLithiumScratchDouble, source_operand);
__ Sdc1(kLithiumScratchDouble, destination_operand);
}
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
#undef __
} // namespace internal
} // namespace v8

View File

@ -1,59 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
#include "src/crankshaft/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver final BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +0,0 @@
jyan@ca.ibm.com
dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
bjaideep@ca.ibm.com

File diff suppressed because it is too large Load Diff

View File

@ -1,344 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
#define V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
#include "src/ast/scopes.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
#include "src/crankshaft/ppc/lithium-ppc.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class LCodeGen : public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
jump_table_(4, info->zone()),
scope_(info->scope()),
deferred_(8, info->zone()),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
!info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
LinkRegisterStatus GetLinkRegisterState() const {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LConstantOperand must be an Integer32 or Smi
void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
intptr_t ToRepresentation(LConstantOperand* op,
const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
LOperand* temp1, LOperand* temp2,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
Register object, Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
MemOperand PrepareKeyedOperand(Register key, Register base,
bool key_is_constant, bool key_is_tagged,
int constant_key, int element_size_shift,
int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratch; }
DoubleRegister double_scratch0() { return kScratchDoubleReg; }
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true, Label* if_false,
Handle<String> class_name, Register input,
Register temporary, Register temporary2);
bool HasAllocatedStackSlots() const {
return chunk()->HasAllocatedStackSlots();
}
int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
int GetTotalFrameSlotCount() const {
return chunk()->GetTotalFrameSlotCount();
}
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
LInstruction* instr, SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function, int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id, int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, function->nargs, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
void PrepareForTailCall(const ParameterCount& actual, Register scratch1,
Register scratch2, Register scratch3);
// Generate a direct call to a known function. Expects the function
// to be in r4.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
bool is_tail_call, LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
DeoptimizeReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
MemOperand BuildSeqStringOperand(Register string, LOperand* index,
String::Encoding encoding);
void EmitMathAbs(LMathAbs* instr);
#if V8_TARGET_ARCH_PPC64
void EmitInteger32MathAbs(LMathAbs* instr);
#endif
// Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
int arguments, Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
Safepoint::DeoptMode mode);
static Condition TokenToCondition(Token::Value op);
void EmitGoto(int block);
// EmitBranch expects to be the last instruction of a block.
template <class InstrType>
void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition,
CRegister cr = cr7);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
DoubleRegister result, NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
Handle<String> type_name);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
int* offset, AllocationSiteMode mode);
void EnsureSpaceForLazyDeopt(int space_needed) override;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
Scope* const scope_;
ZoneList<LDeferredCode*> deferred_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen);
~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode : public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() {}
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} // namespace internal
} // namespace v8
#endif // V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_

View File

@ -1,287 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
namespace v8 {
namespace internal {
static const Register kSavedValueRegister = {11};
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32, owner->zone()),
root_index_(0),
in_cycle_(false),
saved_destination_(NULL) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
DCHECK(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when by reaching this move again.
PerformMove(i);
if (in_cycle_) {
RestoreValue();
}
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
// We can only find a cycle, when doing a depth-first traversal of moves,
// be encountering the starting move again. So by spilling the source of
// the starting move, we break the cycle. All moves are then unblocked,
// and the starting move is completed by writing the spilled value to
// its destination. All other moves from the spilled source have been
// completed prior to breaking the cycle.
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
DCHECK(!moves_[index].IsPending());
DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
// We save in a register the value that should end up in the source of
// moves_[root_index]. After performing all moves in the tree rooted
// in that move, we save the value to that source.
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
DCHECK(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ mr(kSavedValueRegister, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ fmr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ lfd(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// This move will be done by restoring the saved value to the destination.
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
DCHECK(in_cycle_);
DCHECK(saved_destination_ != NULL);
// Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
if (saved_destination_->IsRegister()) {
__ mr(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) {
__ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ fmr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ stfd(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ mr(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
__ StoreP(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ LoadP(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
__ LoadP(ip, source_operand);
__ StoreP(ip, destination_operand);
} else {
__ LoadP(kSavedValueRegister, source_operand);
__ StoreP(kSavedValueRegister, destination_operand);
}
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsInteger32(constant_source)) {
cgen_->EmitLoadIntegerConstant(constant_source, dst);
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ LoadDoubleLiteral(result, v, ip);
} else {
DCHECK(destination->IsStackSlot());
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsInteger32(constant_source)) {
cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
} else {
__ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
}
__ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ fmr(cgen_->ToDoubleRegister(destination), source_register);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ stfd(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ lfd(cgen_->ToDoubleRegister(destination), source_operand);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kSavedDoubleValueRegister was used to break the cycle,
// but kSavedValueRegister is free.
#if V8_TARGET_ARCH_PPC64
__ ld(kSavedValueRegister, source_operand);
__ std(kSavedValueRegister, destination_operand);
#else
MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ lwz(kSavedValueRegister, source_operand);
__ stw(kSavedValueRegister, destination_operand);
__ lwz(kSavedValueRegister, source_high_operand);
__ stw(kSavedValueRegister, destination_high_operand);
#endif
} else {
__ lfd(kScratchDoubleReg, source_operand);
__ stfd(kScratchDoubleReg, destination_operand);
}
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
#undef __
} // namespace internal
} // namespace v8

Some files were not shown because too many files have changed in this diff Show More