[turboshaft] add support for all JS machine-level operators

In particular, this CL adds support for:
- exception handling
- source positions
- OSR
- various numeric operations and conversions

Since the test suite now passes with `--turboshaft`, this also adds a
new variant for Turboshaft and enables it on some bots.

Bug: v8:12783
Change-Id: Ia2dd2e16f56fc955d49e51f86d050218e70cb575
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3669251
Reviewed-by: Darius Mercadier <dmercadier@chromium.org>
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81074}
This commit is contained in:
Tobias Tebbi 2022-06-10 14:25:33 +00:00 committed by V8 LUCI CQ
parent c1a1c11378
commit 77ba98ef32
22 changed files with 1329 additions and 302 deletions

View File

@ -2847,6 +2847,7 @@ filegroup(
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/turboshaft/sidetable.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.cc",

View File

@ -2919,6 +2919,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/turboshaft/sidetable.h",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.h",
"src/compiler/typed-optimization.h",

View File

@ -96,15 +96,12 @@ namespace internal {
\
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCyclicObjectStateDetectedInEscapeAnalysis, \
"Cyclic object state detected by escape analysis") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
V(kFunctionTooBig, "Function is too big to be optimized") \
V(kTooManyArguments, "Function contains a call with too many arguments") \
V(kLiveEdit, "LiveEdit") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \
V(kOptimizationDisabled, "Optimization disabled") \
V(kNeverOptimize, "Optimization is always disabled")

View File

@ -44,7 +44,8 @@ struct SourcePositionInfo;
// DeoptimizationData::InliningPositions, depending on the compilation stage.
class SourcePosition final {
public:
explicit SourcePosition(int script_offset, int inlining_id = kNotInlined)
explicit SourcePosition(int script_offset = kNoSourcePosition,
int inlining_id = kNotInlined)
: value_(0) {
SetIsExternal(false);
SetScriptOffset(script_offset);
@ -57,11 +58,8 @@ class SourcePosition final {
return SourcePosition(line, file_id, kNotInlined);
}
static SourcePosition Unknown() { return SourcePosition(kNoSourcePosition); }
bool IsKnown() const {
if (IsExternal()) return true;
return ScriptOffset() != kNoSourcePosition || InliningId() != kNotInlined;
}
static SourcePosition Unknown() { return SourcePosition(); }
bool IsKnown() const { return raw() != SourcePosition::Unknown().raw(); }
bool isInlined() const {
if (IsExternal()) return false;
return InliningId() != kNotInlined;

View File

@ -95,7 +95,7 @@ InstructionSelector::InstructionSelector(
}
}
bool InstructionSelector::SelectInstructions() {
base::Optional<BailoutReason> InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
for (auto const block : *blocks) {
@ -114,7 +114,8 @@ bool InstructionSelector::SelectInstructions() {
// Visit each basic block in post order.
for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
if (instruction_selection_failed()) return false;
if (instruction_selection_failed())
return BailoutReason::kCodeGenerationFailed;
}
// Schedule the selected instructions.
@ -145,7 +146,7 @@ bool InstructionSelector::SelectInstructions() {
#if DEBUG
sequence()->ValidateSSA();
#endif
return true;
return base::nullopt;
}
void InstructionSelector::StartBlock(RpoNumber rpo) {

View File

@ -300,7 +300,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
bool SelectInstructions();
base::Optional<BailoutReason> SelectInstructions();
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);

View File

@ -13,6 +13,7 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/builtins/profile-data-reader.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/bailout-reason.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
@ -334,8 +335,6 @@ class PipelineData {
CompilationDependencies* dependencies() const { return dependencies_; }
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
OsrHelper* osr_helper() { return &(*osr_helper_); }
bool compilation_failed() const { return compilation_failed_; }
void set_compilation_failed() { compilation_failed_ = true; }
bool verify_graph() const { return verify_graph_; }
void set_verify_graph(bool value) { verify_graph_ = value; }
@ -472,7 +471,6 @@ class PipelineData {
void DeleteGraphZone() {
if (graph_zone_ == nullptr) return;
graph_zone_scope_.Destroy();
graph_zone_ = nullptr;
graph_ = nullptr;
turboshaft_graph_ = nullptr;
@ -485,6 +483,7 @@ class PipelineData {
jsgraph_ = nullptr;
mcgraph_ = nullptr;
schedule_ = nullptr;
graph_zone_scope_.Destroy();
}
void DeleteInstructionZone() {
@ -626,7 +625,6 @@ class PipelineData {
bool may_have_unverifiable_graph_ = true;
ZoneStats* const zone_stats_;
PipelineStatistics* pipeline_statistics_ = nullptr;
bool compilation_failed_ = false;
bool verify_graph_ = false;
int start_source_position_ = kNoSourcePosition;
base::Optional<OsrHelper> osr_helper_;
@ -700,7 +698,7 @@ class PipelineImpl final {
// Helpers for executing pipeline phases.
template <typename Phase, typename... Args>
void Run(Args&&... args);
auto Run(Args&&... args);
// Step A.1. Initialize the heap broker.
void InitializeHeapBroker();
@ -1309,7 +1307,7 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
}
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
auto PipelineImpl::Run(Args&&... args) {
#ifdef V8_RUNTIME_CALL_STATS
PipelineRunScope scope(this->data_, Phase::phase_name(),
Phase::kRuntimeCallCounterId, Phase::kCounterMode);
@ -1317,7 +1315,7 @@ void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
#endif
Phase phase;
phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
return phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
}
#ifdef V8_RUNTIME_CALL_STATS
@ -2030,10 +2028,12 @@ struct BranchConditionDuplicationPhase {
struct BuildTurboshaftPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BuildTurboshaft)
void Run(PipelineData* data, Zone* temp_zone) {
turboshaft::BuildGraph(data->schedule(), data->graph_zone(), temp_zone,
&data->turboshaft_graph());
base::Optional<BailoutReason> Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = data->schedule();
data->reset_schedule();
return turboshaft::BuildGraph(schedule, data->graph_zone(), temp_zone,
&data->turboshaft_graph(),
data->source_positions());
}
};
@ -2051,9 +2051,9 @@ struct TurboshaftRecreateSchedulePhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftRecreateSchedule)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
auto result = turboshaft::RecreateSchedule(data->turboshaft_graph(),
linkage->GetIncomingDescriptor(),
data->graph_zone(), temp_zone);
auto result = turboshaft::RecreateSchedule(
data->turboshaft_graph(), linkage->GetIncomingDescriptor(),
data->graph_zone(), temp_zone, data->source_positions());
data->set_graph(result.graph);
data->set_schedule(result.schedule);
}
@ -2291,7 +2291,8 @@ std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
struct InstructionSelectionPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SelectInstructions)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
base::Optional<BailoutReason> Run(PipelineData* data, Zone* temp_zone,
Linkage* linkage) {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
data->schedule(), data->source_positions(), data->frame(),
@ -2314,8 +2315,8 @@ struct InstructionSelectionPhase {
data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
if (base::Optional<BailoutReason> bailout = selector.SelectInstructions()) {
return bailout;
}
if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
@ -2325,6 +2326,7 @@ struct InstructionSelectionPhase {
&selector.instr_origins()}
<< "},\n";
}
return base::nullopt;
}
};
@ -2855,12 +2857,6 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
if (FLAG_turbo_escape) {
Run<EscapeAnalysisPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(
BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
data->EndPhaseKind();
return false;
}
RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
}
@ -2948,7 +2944,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
ComputeScheduledGraph();
if (FLAG_turboshaft) {
Run<BuildTurboshaftPhase>();
if (base::Optional<BailoutReason> bailout = Run<BuildTurboshaftPhase>()) {
info()->AbortOptimization(*bailout);
data->EndPhaseKind();
return false;
}
Run<PrintTurboshaftGraphPhase>(BuildTurboshaftPhase::phase_name());
Run<OptimizeTurboshaftPhase>();
@ -3519,7 +3519,7 @@ std::unique_ptr<TurbofanCompilationJob> Pipeline::NewCompilationJob(
isolate, shared, function, osr_offset, osr_frame, code_kind);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
void Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool use_mid_tier_register_allocator,
bool run_verifier) {
@ -3541,8 +3541,6 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
} else {
pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
}
return !data.compilation_failed();
}
void PipelineImpl::ComputeScheduledGraph() {
@ -3616,9 +3614,9 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
data->InitializeFrameData(call_descriptor);
}
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
if (data->compilation_failed()) {
info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
if (base::Optional<BailoutReason> bailout =
Run<InstructionSelectionPhase>(linkage)) {
info()->AbortOptimization(*bailout);
data->EndPhaseKind();
return false;
}
@ -3687,12 +3685,6 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
VerifyGeneratedCodeIsIdempotent();
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(
BailoutReason::kNotEnoughVirtualRegistersRegalloc);
data->EndPhaseKind();
return false;
}
// TODO(mtrofin): move this off to the register allocator.
bool generate_frame_at_start =

View File

@ -102,7 +102,7 @@ class Pipeline : public AllStatic {
const AssemblerOptions& options, Schedule* schedule = nullptr);
// Run just the register allocator phases.
V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
V8_EXPORT_PRIVATE static void AllocateRegistersForTesting(
const RegisterConfiguration* config, InstructionSequence* sequence,
bool use_fast_register_allocator, bool run_verifier);

View File

@ -17,6 +17,7 @@
#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/zone/zone-containers.h"
@ -42,8 +43,6 @@ class AssemblerInterface : public Superclass {
left, right, OverflowCheckedBinopOp::Kind::kSignedAdd, rep);
}
OpIndex Sub(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kSub, rep);
}
OpIndex SubWithOverflow(OpIndex left, OpIndex right,
@ -54,6 +53,16 @@ class AssemblerInterface : public Superclass {
OpIndex Mul(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kMul, rep);
}
OpIndex SignedMulOverflownBits(OpIndex left, OpIndex right,
MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kSignedMulOverflownBits,
rep);
}
OpIndex UnsignedMulOverflownBits(OpIndex left, OpIndex right,
MachineRepresentation rep) {
return subclass().Binop(left, right,
BinopOp::Kind::kUnsignedMulOverflownBits, rep);
}
OpIndex MulWithOverflow(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
@ -61,6 +70,18 @@ class AssemblerInterface : public Superclass {
return subclass().OverflowCheckedBinop(
left, right, OverflowCheckedBinopOp::Kind::kSignedMul, rep);
}
OpIndex SignedDiv(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kSignedDiv, rep);
}
OpIndex UnsignedDiv(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kUnsignedDiv, rep);
}
OpIndex SignedMod(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kSignedMod, rep);
}
OpIndex UnsignedMod(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kUnsignedMod, rep);
}
OpIndex BitwiseAnd(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
@ -71,6 +92,22 @@ class AssemblerInterface : public Superclass {
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kBitwiseOr, rep);
}
OpIndex Min(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kMin, rep);
}
OpIndex Max(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kMax, rep);
}
OpIndex Power(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kPower, rep);
}
OpIndex Atan2(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kAtan2, rep);
}
OpIndex BitwiseXor(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
@ -81,6 +118,37 @@ class AssemblerInterface : public Superclass {
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kShiftLeft, rep);
}
OpIndex ShiftRightArithmetic(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kShiftRightArithmetic,
rep);
}
OpIndex ShiftRightArithmeticShiftOutZeros(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(
left, right, ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros, rep);
}
OpIndex ShiftRightLogical(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kShiftRightLogical,
rep);
}
OpIndex RotateLeft(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kRotateLeft, rep);
}
OpIndex RotateRight(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kRotateRight, rep);
}
OpIndex Word32Constant(uint32_t value) {
return subclass().Constant(ConstantOp::Kind::kWord32, uint64_t{value});
}
@ -110,6 +178,13 @@ class AssemblerInterface : public Superclass {
MachineRepresentation::kWord32);
}
OpIndex ExceptionValueProjection(OpIndex value) {
return subclass().Projection(value, ProjectionOp::Kind::kExceptionValue, 0);
}
OpIndex TupleProjection(OpIndex value, uint16_t index) {
return subclass().Projection(value, ProjectionOp::Kind::kTuple, index);
}
private:
Subclass& subclass() { return *static_cast<Subclass*>(this); }
};
@ -140,6 +215,10 @@ class Assembler
return true;
}
void SetCurrentSourcePosition(SourcePosition position) {
current_source_position_ = position;
}
OpIndex Phi(base::Vector<const OpIndex> inputs, MachineRepresentation rep) {
DCHECK(current_block()->IsMerge() &&
inputs.size() == current_block()->Predecessors().size());
@ -163,6 +242,12 @@ class Assembler
return Base::Branch(condition, if_true, if_false);
}
OpIndex CatchException(OpIndex call, Block* if_success, Block* if_exception) {
if_success->AddPredecessor(current_block());
if_exception->AddPredecessor(current_block());
return Base::CatchException(call, if_success, if_exception);
}
OpIndex Switch(OpIndex input, base::Vector<const SwitchOp::Case> cases,
Block* default_case) {
for (SwitchOp::Case c : cases) {
@ -195,12 +280,16 @@ class Assembler
static_assert(!(std::is_same<Op, Operation>::value));
DCHECK_NOT_NULL(current_block_);
OpIndex result = graph().Add<Op>(args...);
if (current_source_position_.IsKnown()) {
graph().source_positions()[result] = current_source_position_;
}
if (Op::properties.is_block_terminator) FinalizeBlock();
return result;
}
Block* current_block_ = nullptr;
Graph& graph_;
SourcePosition current_source_position_ = SourcePosition::Unknown();
Zone* const phase_zone_;
};

View File

@ -5,6 +5,7 @@
#ifndef V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_
#define V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_
#include "src/common/globals.h"
#include "src/compiler/turboshaft/operations.h"
namespace v8::internal::compiler::turboshaft {
@ -14,8 +15,10 @@ struct FrameStateData {
enum class Instr : uint8_t {
kInput, // 1 Operand: input machine type
kUnusedRegister,
kDematerializedObject, // 2 Operands: id, field_count
kDematerializedObjectReference // 1 Operand: id
kDematerializedObject, // 2 Operands: id, field_count
kDematerializedObjectReference, // 1 Operand: id
kArgumentsElements, // 1 Operand: type
kArgumentsLength,
};
class Builder {
@ -46,6 +49,15 @@ struct FrameStateData {
int_operands_.push_back(field_count);
}
void AddArgumentsElements(CreateArgumentsType type) {
instructions_.push_back(Instr::kArgumentsElements);
int_operands_.push_back(static_cast<int>(type));
}
void AddArgumentsLength() {
instructions_.push_back(Instr::kArgumentsLength);
}
const FrameStateData* AllocateFrameStateData(
const FrameStateInfo& frame_state_info, Zone* zone) {
return zone->New<FrameStateData>(FrameStateData{
@ -100,6 +112,16 @@ struct FrameStateData {
*id = int_operands[0];
int_operands += 1;
}
void ConsumeArgumentsElements(CreateArgumentsType* type) {
DCHECK_EQ(instructions[0], Instr::kArgumentsElements);
instructions += 1;
*type = static_cast<CreateArgumentsType>(int_operands[0]);
int_operands += 1;
}
void ConsumeArgumentsLength() {
DCHECK_EQ(instructions[0], Instr::kArgumentsLength);
instructions += 1;
}
};
Iterator iterator(base::Vector<const OpIndex> state_values) const {

View File

@ -8,11 +8,14 @@
#include <numeric>
#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/safe_conversions.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/codegen/bailout-reason.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-aux-data.h"
#include "src/compiler/node-properties.h"
@ -34,10 +37,12 @@ struct GraphBuilder {
Zone* phase_zone;
Schedule& schedule;
Assembler assembler;
SourcePositionTable* source_positions;
NodeAuxData<OpIndex> op_mapping{phase_zone};
ZoneVector<Block*> block_mapping{schedule.RpoBlockCount(), phase_zone};
void Run();
base::Optional<BailoutReason> Run();
private:
OpIndex Map(Node* old_node) {
@ -81,6 +86,10 @@ struct GraphBuilder {
ProcessDeoptInput(builder, input->InputAt(i),
(*info.machine_types())[i]);
}
} else if (input->opcode() == IrOpcode::kArgumentsElementsState) {
builder->AddArgumentsElements(ArgumentsStateTypeOf(input->op()));
} else if (input->opcode() == IrOpcode::kArgumentsLengthState) {
builder->AddArgumentsLength();
} else {
builder->AddInput(type, Map(input));
}
@ -134,7 +143,7 @@ struct GraphBuilder {
const base::SmallVector<int, 16>& predecessor_permutation);
};
void GraphBuilder::Run() {
base::Optional<BailoutReason> GraphBuilder::Run() {
for (BasicBlock* block : *schedule.rpo_order()) {
block_mapping[block->rpo_number()] = assembler.NewBlock(BlockKind(block));
}
@ -158,10 +167,20 @@ void GraphBuilder::Run() {
});
for (Node* node : *block->nodes()) {
if (V8_UNLIKELY(node->InputCount() >=
int{std::numeric_limits<
decltype(Operation::input_count)>::max()})) {
return BailoutReason::kTooManyArguments;
}
OpIndex i = Process(node, block, predecessor_permutation);
op_mapping.Set(node, i);
}
if (Node* node = block->control_input()) {
if (V8_UNLIKELY(node->InputCount() >=
int{std::numeric_limits<
decltype(Operation::input_count)>::max()})) {
return BailoutReason::kTooManyArguments;
}
OpIndex i = Process(node, block, predecessor_permutation);
op_mapping.Set(node, i);
}
@ -182,7 +201,15 @@ void GraphBuilder::Run() {
case BasicBlock::kDeoptimize:
case BasicBlock::kThrow:
break;
case BasicBlock::kCall:
case BasicBlock::kCall: {
Node* call = block->control_input();
DCHECK_EQ(call->opcode(), IrOpcode::kCall);
DCHECK_EQ(block->SuccessorCount(), 2);
Block* if_success = Map(block->SuccessorAt(0));
Block* if_exception = Map(block->SuccessorAt(1));
assembler.CatchException(Map(call), if_success, if_exception);
break;
}
case BasicBlock::kTailCall:
UNIMPLEMENTED();
case BasicBlock::kNone:
@ -190,11 +217,16 @@ void GraphBuilder::Run() {
}
DCHECK_NULL(assembler.current_block());
}
return base::nullopt;
}
OpIndex GraphBuilder::Process(
Node* node, BasicBlock* block,
const base::SmallVector<int, 16>& predecessor_permutation) {
if (source_positions) {
assembler.SetCurrentSourcePosition(
source_positions->GetSourcePosition(node));
}
const Operator* op = node->op();
Operator::Opcode opcode = op->opcode();
switch (opcode) {
@ -205,18 +237,29 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kIfFalse:
case IrOpcode::kIfDefault:
case IrOpcode::kIfValue:
case IrOpcode::kStateValues:
case IrOpcode::kTypedStateValues:
case IrOpcode::kObjectId:
case IrOpcode::kTypedObjectState:
case IrOpcode::kArgumentsElementsState:
case IrOpcode::kArgumentsLengthState:
case IrOpcode::kEffectPhi:
case IrOpcode::kTerminate:
case IrOpcode::kIfSuccess:
return OpIndex::Invalid();
case IrOpcode::kIfException:
return assembler.ExceptionValueProjection(Map(node->InputAt(0)));
case IrOpcode::kParameter: {
const ParameterInfo& info = ParameterInfoOf(op);
return assembler.Parameter(info.index(), info.debug_name());
}
case IrOpcode::kOsrValue: {
return assembler.OsrValue(OsrValueIndexOf(op));
}
case IrOpcode::kPhi: {
int input_count = op->ValueInputCount();
MachineRepresentation rep = PhiRepresentationOf(op);
@ -306,23 +349,35 @@ OpIndex GraphBuilder::Process(
rep);
}
case IrOpcode::kWord32Shr:
return assembler.ShiftRightLogical(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64Shr:
case IrOpcode::kWord32Shr: {
MachineRepresentation rep = opcode == IrOpcode::kWord64Shr
? MachineRepresentation::kWord64
: MachineRepresentation::kWord32;
return assembler.Shift(Map(node->InputAt(0)), Map(node->InputAt(1)),
ShiftOp::Kind::kShiftRightLogical, rep);
}
return assembler.ShiftRightLogical(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kWord32Shl:
return assembler.ShiftLeft(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64Shl:
case IrOpcode::kWord32Shl: {
MachineRepresentation rep = opcode == IrOpcode::kWord64Shl
? MachineRepresentation::kWord64
: MachineRepresentation::kWord32;
return assembler.Shift(Map(node->InputAt(0)), Map(node->InputAt(1)),
ShiftOp::Kind::kShiftLeft, rep);
}
return assembler.ShiftLeft(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kWord32Rol:
return assembler.RotateLeft(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64Rol:
return assembler.RotateLeft(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kWord32Ror:
return assembler.RotateRight(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64Ror:
return assembler.RotateRight(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kWord32Equal:
return assembler.Equal(Map(node->InputAt(0)), Map(node->InputAt(1)),
@ -411,6 +466,14 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kInt32Mul:
return assembler.Mul(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt32MulHigh:
return assembler.SignedMulOverflownBits(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kUint32MulHigh:
return assembler.UnsignedMulOverflownBits(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt32MulWithOverflow:
return assembler.MulWithOverflow(Map(node->InputAt(0)),
Map(node->InputAt(1)),
@ -418,6 +481,47 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kInt64Mul:
return assembler.Mul(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kFloat64Mul:
return assembler.Mul(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32Mul:
return assembler.Mul(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat32);
case IrOpcode::kInt32Div:
return assembler.SignedDiv(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kUint32Div:
return assembler.UnsignedDiv(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt64Div:
return assembler.SignedDiv(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kUint64Div:
return assembler.UnsignedDiv(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kFloat32Div:
return assembler.SignedDiv(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Div:
return assembler.SignedDiv(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kInt32Mod:
return assembler.SignedMod(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kUint32Mod:
return assembler.UnsignedMod(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt64Mod:
return assembler.SignedMod(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kUint64Mod:
return assembler.UnsignedMod(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kFloat64Mod:
return assembler.SignedMod(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kInt32Sub:
return assembler.Sub(Map(node->InputAt(0)), Map(node->InputAt(1)),
@ -433,6 +537,49 @@ OpIndex GraphBuilder::Process(
return assembler.SubWithOverflow(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kFloat64Sub:
return assembler.Sub(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32Sub:
return assembler.Sub(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Min:
return assembler.Min(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32Min:
return assembler.Min(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Max:
return assembler.Max(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32Max:
return assembler.Max(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Pow:
return assembler.Power(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Atan2:
return assembler.Atan2(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kWord32ReverseBytes:
return assembler.IntegerUnary(Map(node->InputAt(0)),
IntegerUnaryOp::Kind::kReverseBytes,
MachineRepresentation::kWord32);
case IrOpcode::kWord64ReverseBytes:
return assembler.IntegerUnary(Map(node->InputAt(0)),
IntegerUnaryOp::Kind::kReverseBytes,
MachineRepresentation::kWord64);
case IrOpcode::kWord32Clz:
return assembler.IntegerUnary(Map(node->InputAt(0)),
IntegerUnaryOp::Kind::kCountLeadingZeros,
MachineRepresentation::kWord32);
case IrOpcode::kWord64Clz:
return assembler.IntegerUnary(Map(node->InputAt(0)),
IntegerUnaryOp::Kind::kCountLeadingZeros,
MachineRepresentation::kWord64);
case IrOpcode::kFloat32Abs:
return assembler.FloatUnary(Map(node->InputAt(0)),
@ -457,6 +604,102 @@ OpIndex GraphBuilder::Process(
FloatUnaryOp::Kind::kSilenceNaN,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32RoundDown:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundDown,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64RoundDown:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundDown,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32RoundUp:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundUp,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64RoundUp:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundUp,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32RoundTruncate:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundToZero,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64RoundTruncate:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundToZero,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32RoundTiesEven:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundTiesEven,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64RoundTiesEven:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kRoundTiesEven,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Log:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kLog,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32Sqrt:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kSqrt,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Sqrt:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kSqrt,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Exp:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kExp,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Expm1:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kExpm1,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Sin:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kSin,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Cos:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kCos,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Sinh:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kSinh,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Cosh:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kCosh,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Asin:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kAsin,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Acos:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kAcos,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Asinh:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kAsinh,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Acosh:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kAcosh,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Tan:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kTan,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Tanh:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kTanh,
MachineRepresentation::kFloat64);
case IrOpcode::kTruncateInt64ToInt32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kIntegerTruncate,
@ -465,6 +708,22 @@ OpIndex GraphBuilder::Process(
return assembler.Change(Map(node->InputAt(0)), ChangeOp::Kind::kBitcast,
MachineRepresentation::kWord32,
MachineRepresentation::kWord64);
case IrOpcode::kBitcastFloat32ToInt32:
return assembler.Change(Map(node->InputAt(0)), ChangeOp::Kind::kBitcast,
MachineRepresentation::kFloat32,
MachineRepresentation::kWord32);
case IrOpcode::kBitcastInt32ToFloat32:
return assembler.Change(Map(node->InputAt(0)), ChangeOp::Kind::kBitcast,
MachineRepresentation::kWord32,
MachineRepresentation::kFloat32);
case IrOpcode::kBitcastFloat64ToInt64:
return assembler.Change(Map(node->InputAt(0)), ChangeOp::Kind::kBitcast,
MachineRepresentation::kFloat64,
MachineRepresentation::kWord64);
case IrOpcode::kBitcastInt64ToFloat64:
return assembler.Change(Map(node->InputAt(0)), ChangeOp::Kind::kBitcast,
MachineRepresentation::kWord64,
MachineRepresentation::kFloat64);
case IrOpcode::kChangeUint32ToUint64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kZeroExtend,
@ -503,6 +762,14 @@ OpIndex GraphBuilder::Process(
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kUnsignedFloatTruncate,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kTruncateFloat64ToFloat32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kFloatConversion,
MachineRepresentation::kFloat64, MachineRepresentation::kFloat32);
case IrOpcode::kChangeFloat32ToFloat64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kFloatConversion,
MachineRepresentation::kFloat32, MachineRepresentation::kFloat64);
case IrOpcode::kRoundFloat64ToInt32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kSignedFloatTruncate,
@ -532,6 +799,15 @@ OpIndex GraphBuilder::Process(
Map(node->InputAt(0)), ChangeOp::Kind::kExtractHighHalf,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kFloat64InsertLowWord32:
return assembler.Float64InsertWord32(
Map(node->InputAt(0)), Map(node->InputAt(1)),
Float64InsertWord32Op::Kind::kLowHalf);
case IrOpcode::kFloat64InsertHighWord32:
return assembler.Float64InsertWord32(
Map(node->InputAt(0)), Map(node->InputAt(1)),
Float64InsertWord32Op::Kind::kHighHalf);
case IrOpcode::kBitcastTaggedToWord:
return assembler.TaggedBitcast(Map(node->InputAt(0)),
MachineRepresentation::kTagged,
@ -541,62 +817,81 @@ OpIndex GraphBuilder::Process(
MachineType::PointerRepresentation(),
MachineRepresentation::kTagged);
case IrOpcode::kLoad: {
case IrOpcode::kLoad:
case IrOpcode::kUnalignedLoad: {
MachineType loaded_rep = LoadRepresentationOf(op);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
LoadOp::Kind kind = opcode == IrOpcode::kLoad
? LoadOp::Kind::kRawAligned
: LoadOp::Kind::kRawUnaligned;
if (index->opcode() == IrOpcode::kInt32Constant) {
int32_t offset = OpParameter<int32_t>(index->op());
return assembler.Load(Map(base), LoadOp::Kind::kRaw, loaded_rep,
offset);
return assembler.Load(Map(base), kind, loaded_rep, offset);
}
if (index->opcode() == IrOpcode::kInt64Constant) {
int64_t offset = OpParameter<int64_t>(index->op());
if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
return assembler.Load(Map(base), LoadOp::Kind::kRaw, loaded_rep,
return assembler.Load(Map(base), kind, loaded_rep,
static_cast<int32_t>(offset));
}
}
int32_t offset = 0;
uint8_t element_size_log2 = 0;
return assembler.IndexedLoad(Map(base), Map(index),
IndexedLoadOp::Kind::kRaw, loaded_rep,
return assembler.IndexedLoad(Map(base), Map(index), kind, loaded_rep,
offset, element_size_log2);
}
case IrOpcode::kStore: {
StoreRepresentation store_rep = StoreRepresentationOf(op);
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore: {
bool aligned = opcode == IrOpcode::kStore;
StoreRepresentation store_rep =
aligned ? StoreRepresentationOf(op)
: StoreRepresentation(UnalignedStoreRepresentationOf(op),
WriteBarrierKind::kNoWriteBarrier);
StoreOp::Kind kind =
aligned ? StoreOp::Kind::kRawAligned : StoreOp::Kind::kRawUnaligned;
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (index->opcode() == IrOpcode::kInt32Constant) {
int32_t offset = OpParameter<int32_t>(index->op());
return assembler.Store(Map(base), Map(value), StoreOp::Kind::kRaw,
return assembler.Store(Map(base), Map(value), kind,
store_rep.representation(),
store_rep.write_barrier_kind(), offset);
}
if (index->opcode() == IrOpcode::kInt64Constant) {
int64_t offset = OpParameter<int64_t>(index->op());
if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
return assembler.Store(Map(base), Map(value), StoreOp::Kind::kRaw,
store_rep.representation(),
store_rep.write_barrier_kind(),
static_cast<int32_t>(offset));
return assembler.Store(
Map(base), Map(value), kind, store_rep.representation(),
store_rep.write_barrier_kind(), static_cast<int32_t>(offset));
}
}
int32_t offset = 0;
uint8_t element_size_log2 = 0;
return assembler.IndexedStore(
Map(base), Map(index), Map(value), IndexedStoreOp::Kind::kRaw,
store_rep.representation(), store_rep.write_barrier_kind(), offset,
element_size_log2);
Map(base), Map(index), Map(value), kind, store_rep.representation(),
store_rep.write_barrier_kind(), offset, element_size_log2);
}
case IrOpcode::kRetain:
return assembler.Retain(Map(node->InputAt(0)));
case IrOpcode::kStackPointerGreaterThan:
return assembler.StackPointerGreaterThan(Map(node->InputAt(0)),
StackCheckKindOf(op));
case IrOpcode::kLoadStackCheckOffset:
return assembler.LoadStackCheckOffset();
return assembler.FrameConstant(FrameConstantOp::Kind::kStackCheckOffset);
case IrOpcode::kLoadFramePointer:
return assembler.FrameConstant(FrameConstantOp::Kind::kFramePointer);
case IrOpcode::kLoadParentFramePointer:
return assembler.FrameConstant(
FrameConstantOp::Kind::kParentFramePointer);
case IrOpcode::kStackSlot:
return assembler.StackSlot(StackSlotRepresentationOf(op).size(),
StackSlotRepresentationOf(op).alignment());
case IrOpcode::kBranch:
DCHECK_EQ(block->SuccessorCount(), 2);
@ -664,15 +959,11 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kReturn: {
Node* pop_count = node->InputAt(0);
if (pop_count->opcode() != IrOpcode::kInt32Constant) {
UNIMPLEMENTED();
}
base::SmallVector<OpIndex, 4> return_values;
for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
return_values.push_back(Map(node->InputAt(i)));
}
return assembler.Return(base::VectorOf(return_values),
OpParameter<int32_t>(pop_count->op()));
return assembler.Return(Map(pop_count), base::VectorOf(return_values));
}
case IrOpcode::kUnreachable:
@ -686,23 +977,8 @@ OpIndex GraphBuilder::Process(
case IrOpcode::kProjection: {
Node* input = node->InputAt(0);
size_t index = ProjectionIndexOf(op);
switch (input->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt32MulWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
case IrOpcode::kInt64SubWithOverflow:
if (index == 0) {
return assembler.Projection(Map(input),
ProjectionOp::Kind::kResult);
} else {
DCHECK_EQ(index, 1);
return assembler.Projection(Map(input),
ProjectionOp::Kind::kOverflowBit);
}
default:
UNIMPLEMENTED();
}
return assembler.Projection(Map(input), ProjectionOp::Kind::kTuple,
index);
}
default:
@ -714,9 +990,11 @@ OpIndex GraphBuilder::Process(
} // namespace
void BuildGraph(Schedule* schedule, Zone* graph_zone, Zone* phase_zone,
Graph* graph) {
GraphBuilder{graph_zone, phase_zone, *schedule, Assembler(graph, phase_zone)}
base::Optional<BailoutReason> BuildGraph(
Schedule* schedule, Zone* graph_zone, Zone* phase_zone, Graph* graph,
SourcePositionTable* source_positions) {
return GraphBuilder{graph_zone, phase_zone, *schedule,
Assembler(graph, phase_zone), source_positions}
.Run();
}

View File

@ -5,14 +5,17 @@
#ifndef V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_
#define V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_
#include "src/codegen/bailout-reason.h"
#include "src/compiler/turboshaft/graph.h"
namespace v8::internal::compiler {
class Schedule;
class SourcePositionTable;
}
namespace v8::internal::compiler::turboshaft {
void BuildGraph(Schedule* schedule, Zone* graph_zone, Zone* phase_zone,
Graph* graph);
base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Zone* graph_zone,
Zone* phase_zone, Graph* graph,
SourcePositionTable* source_positions);
}
#endif // V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_

View File

@ -14,7 +14,9 @@
#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/codegen/source-position.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/sidetable.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
@ -268,12 +270,14 @@ class Graph {
: operations_(graph_zone, initial_capacity),
bound_blocks_(graph_zone),
all_blocks_(graph_zone),
graph_zone_(graph_zone) {}
graph_zone_(graph_zone),
source_positions_(graph_zone) {}
// Reset the graph to recycle its memory.
void Reset() {
operations_.Reset();
bound_blocks_.clear();
source_positions_.Reset();
next_block_ = 0;
}
@ -474,6 +478,13 @@ class Graph {
bool IsValid(OpIndex i) const { return i < next_operation_index(); }
const GrowingSidetable<SourcePosition>& source_positions() const {
return source_positions_;
}
GrowingSidetable<SourcePosition>& source_positions() {
return source_positions_;
}
Graph& GetOrCreateCompanion() {
if (!companion_) {
companion_ = std::make_unique<Graph>(graph_zone_, operations_.size());
@ -493,6 +504,7 @@ class Graph {
std::swap(all_blocks_, companion.all_blocks_);
std::swap(next_block_, companion.next_block_);
std::swap(graph_zone_, companion.graph_zone_);
std::swap(source_positions_, companion.source_positions_);
#ifdef DEBUG
// Update generation index.
DCHECK_EQ(generation_ + 1, companion.generation_);
@ -513,6 +525,8 @@ class Graph {
ZoneVector<Block*> all_blocks_;
size_t next_block_ = 0;
Zone* graph_zone_;
GrowingSidetable<SourcePosition> source_positions_;
std::unique_ptr<Graph> companion_ = {};
#ifdef DEBUG
size_t generation_ = 1;

View File

@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/compiler/frame-states.h"
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
@ -38,6 +39,15 @@ std::ostream& operator<<(std::ostream& os, OperationPrintStyle styled_op) {
return os;
}
std::ostream& operator<<(std::ostream& os, IntegerUnaryOp::Kind kind) {
switch (kind) {
case IntegerUnaryOp::Kind::kReverseBytes:
return os << "ReverseBytes";
case IntegerUnaryOp::Kind::kCountLeadingZeros:
return os << "CountLeadingZeros";
}
}
std::ostream& operator<<(std::ostream& os, FloatUnaryOp::Kind kind) {
switch (kind) {
case FloatUnaryOp::Kind::kAbs:
@ -46,6 +56,42 @@ std::ostream& operator<<(std::ostream& os, FloatUnaryOp::Kind kind) {
return os << "Negate";
case FloatUnaryOp::Kind::kSilenceNaN:
return os << "SilenceNaN";
case FloatUnaryOp::Kind::kRoundUp:
return os << "RoundUp";
case FloatUnaryOp::Kind::kRoundDown:
return os << "RoundDown";
case FloatUnaryOp::Kind::kRoundToZero:
return os << "RoundToZero";
case FloatUnaryOp::Kind::kRoundTiesEven:
return os << "RoundTiesEven";
case FloatUnaryOp::Kind::kLog:
return os << "Log";
case FloatUnaryOp::Kind::kSqrt:
return os << "Sqrt";
case FloatUnaryOp::Kind::kExp:
return os << "Exp";
case FloatUnaryOp::Kind::kExpm1:
return os << "Expm1";
case FloatUnaryOp::Kind::kSin:
return os << "Sin";
case FloatUnaryOp::Kind::kCos:
return os << "Cos";
case FloatUnaryOp::Kind::kAsin:
return os << "Asin";
case FloatUnaryOp::Kind::kAcos:
return os << "Acos";
case FloatUnaryOp::Kind::kSinh:
return os << "Sinh";
case FloatUnaryOp::Kind::kCosh:
return os << "Cosh";
case FloatUnaryOp::Kind::kAsinh:
return os << "Asinh";
case FloatUnaryOp::Kind::kAcosh:
return os << "Acosh";
case FloatUnaryOp::Kind::kTan:
return os << "Tan";
case FloatUnaryOp::Kind::kTanh:
return os << "Tanh";
}
}
@ -59,6 +105,10 @@ std::ostream& operator<<(std::ostream& os, ShiftOp::Kind kind) {
return os << "ShiftRightLogical";
case ShiftOp::Kind::kShiftLeft:
return os << "ShiftLeft";
case ShiftOp::Kind::kRotateRight:
return os << "RotateRight";
case ShiftOp::Kind::kRotateLeft:
return os << "RotateLeft";
}
}
@ -83,6 +133,8 @@ std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
return os << "UnsignedNarrowing";
case ChangeOp::Kind::kIntegerTruncate:
return os << "IntegerTruncate";
case ChangeOp::Kind::kFloatConversion:
return os << "FloatConversion";
case ChangeOp::Kind::kSignedFloatTruncate:
return os << "SignedFloatTruncate";
case ChangeOp::Kind::kUnsignedFloatTruncate:
@ -106,12 +158,32 @@ std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
}
}
std::ostream& operator<<(std::ostream& os, Float64InsertWord32Op::Kind kind) {
switch (kind) {
case Float64InsertWord32Op::Kind::kLowHalf:
return os << "LowHalf";
case Float64InsertWord32Op::Kind::kHighHalf:
return os << "HighHalf";
}
}
std::ostream& operator<<(std::ostream& os, ProjectionOp::Kind kind) {
switch (kind) {
case ProjectionOp::Kind::kOverflowBit:
return os << "overflow bit";
case ProjectionOp::Kind::kResult:
return os << "result";
case ProjectionOp::Kind::kTuple:
return os << "tuple";
case ProjectionOp::Kind::kExceptionValue:
return os << "exception value";
}
}
std::ostream& operator<<(std::ostream& os, FrameConstantOp::Kind kind) {
switch (kind) {
case FrameConstantOp::Kind::kStackCheckOffset:
return os << "stack check offset";
case FrameConstantOp::Kind::kFramePointer:
return os << "frame pointer";
case FrameConstantOp::Kind::kParentFramePointer:
return os << "parent frame pointer";
}
}
@ -169,14 +241,8 @@ void ConstantOp::PrintOptions(std::ostream& os) const {
void LoadOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << loaded_rep;
if (offset != 0) os << ", offset: " << offset;
os << "]";
@ -190,14 +256,8 @@ void ParameterOp::PrintOptions(std::ostream& os) const {
void IndexedLoadOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << loaded_rep;
if (element_size_log2 != 0)
os << ", element size: 2^" << int{element_size_log2};
@ -207,14 +267,8 @@ void IndexedLoadOp::PrintOptions(std::ostream& os) const {
void StoreOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << stored_rep;
os << ", " << write_barrier;
if (offset != 0) os << ", offset: " << offset;
@ -223,14 +277,8 @@ void StoreOp::PrintOptions(std::ostream& os) const {
void IndexedStoreOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << stored_rep;
os << ", " << write_barrier;
if (element_size_log2 != 0)
@ -273,6 +321,17 @@ void FrameStateOp::PrintOptions(std::ostream& os) const {
os << "$" << id;
break;
}
case FrameStateData::Instr::kArgumentsElements: {
CreateArgumentsType type;
it.ConsumeArgumentsElements(&type);
os << "ArgumentsElements(" << type << ")";
break;
}
case FrameStateData::Instr::kArgumentsLength: {
it.ConsumeArgumentsLength();
os << "ArgumentsLength";
break;
}
}
}
os << "]";
@ -282,22 +341,52 @@ void BinopOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kAdd:
os << "add, ";
os << "Add, ";
break;
case Kind::kSub:
os << "sub, ";
os << "Sub, ";
break;
case Kind::kMul:
os << "signed mul, ";
os << "Mul, ";
break;
case Kind::kSignedMulOverflownBits:
os << "SignedMulOverflownBits, ";
break;
case Kind::kUnsignedMulOverflownBits:
os << "UnsignedMulOverflownBits, ";
break;
case Kind::kSignedDiv:
os << "SignedDiv, ";
break;
case Kind::kUnsignedDiv:
os << "UnsignedDiv, ";
break;
case Kind::kSignedMod:
os << "SignedMod, ";
break;
case Kind::kUnsignedMod:
os << "UnsignedMod, ";
break;
case Kind::kBitwiseAnd:
os << "bitwise and, ";
os << "BitwiseAnd, ";
break;
case Kind::kBitwiseOr:
os << "bitwise or, ";
os << "BitwiseOr, ";
break;
case Kind::kBitwiseXor:
os << "bitwise xor, ";
os << "BitwiseXor, ";
break;
case Kind::kMin:
os << "Min, ";
break;
case Kind::kMax:
os << "Max, ";
break;
case Kind::kPower:
os << "Power, ";
break;
case Kind::kAtan2:
os << "Atan2, ";
break;
}
os << rep;

View File

@ -61,11 +61,13 @@ class Graph;
#define TURBOSHAFT_OPERATION_LIST(V) \
V(Binop) \
V(OverflowCheckedBinop) \
V(IntegerUnary) \
V(FloatUnary) \
V(Shift) \
V(Equal) \
V(Comparison) \
V(Change) \
V(Float64InsertWord32) \
V(TaggedBitcast) \
V(PendingLoopPhi) \
V(Constant) \
@ -73,10 +75,13 @@ class Graph;
V(IndexedLoad) \
V(Store) \
V(IndexedStore) \
V(Retain) \
V(Parameter) \
V(OsrValue) \
V(Goto) \
V(StackPointerGreaterThan) \
V(LoadStackCheckOffset) \
V(StackSlot) \
V(FrameConstant) \
V(CheckLazyDeopt) \
V(Deoptimize) \
V(DeoptimizeIf) \
@ -86,6 +91,7 @@ class Graph;
V(Unreachable) \
V(Return) \
V(Branch) \
V(CatchException) \
V(Switch) \
V(Projection)
@ -438,10 +444,20 @@ struct BinopOp : FixedArityOperationT<2, BinopOp> {
enum class Kind : uint8_t {
kAdd,
kMul,
kSignedMulOverflownBits,
kUnsignedMulOverflownBits,
kBitwiseAnd,
kBitwiseOr,
kBitwiseXor,
kMin,
kMax,
kSub,
kSignedDiv,
kUnsignedDiv,
kSignedMod,
kUnsignedMod,
kPower,
kAtan2,
};
Kind kind;
MachineRepresentation rep;
@ -455,11 +471,21 @@ struct BinopOp : FixedArityOperationT<2, BinopOp> {
switch (kind) {
case Kind::kAdd:
case Kind::kMul:
case Kind::kSignedMulOverflownBits:
case Kind::kUnsignedMulOverflownBits:
case Kind::kBitwiseAnd:
case Kind::kBitwiseOr:
case Kind::kBitwiseXor:
case Kind::kMin:
case Kind::kMax:
return true;
case Kind::kSub:
case Kind::kSignedDiv:
case Kind::kUnsignedDiv:
case Kind::kSignedMod:
case Kind::kUnsignedMod:
case Kind::kPower:
case Kind::kAtan2:
return false;
}
}
@ -474,8 +500,18 @@ struct BinopOp : FixedArityOperationT<2, BinopOp> {
case Kind::kBitwiseAnd:
case Kind::kBitwiseOr:
case Kind::kBitwiseXor:
case Kind::kMin:
case Kind::kMax:
return true;
case Kind::kSignedMulOverflownBits:
case Kind::kUnsignedMulOverflownBits:
case Kind::kSub:
case Kind::kSignedDiv:
case Kind::kUnsignedDiv:
case Kind::kSignedMod:
case Kind::kUnsignedMod:
case Kind::kPower:
case Kind::kAtan2:
return false;
}
}
@ -490,6 +526,20 @@ struct BinopOp : FixedArityOperationT<2, BinopOp> {
case Kind::kBitwiseXor:
case Kind::kSub:
return true;
case Kind::kSignedMulOverflownBits:
case Kind::kUnsignedMulOverflownBits:
case Kind::kSignedDiv:
case Kind::kUnsignedDiv:
case Kind::kSignedMod:
case Kind::kUnsignedMod:
return false;
case Kind::kMin:
case Kind::kMax:
case Kind::kPower:
case Kind::kAtan2:
// Doesn't apply to operations only supported on floating-point
// representations.
UNREACHABLE();
}
}
@ -531,8 +581,47 @@ struct OverflowCheckedBinopOp
void PrintOptions(std::ostream& os) const;
};
struct IntegerUnaryOp : FixedArityOperationT<1, IntegerUnaryOp> {
enum class Kind : uint8_t {
kReverseBytes,
kCountLeadingZeros,
};
Kind kind;
MachineRepresentation rep;
static constexpr OpProperties properties = OpProperties::Pure();
OpIndex input() const { return Base::input(0); }
explicit IntegerUnaryOp(OpIndex input, Kind kind, MachineRepresentation rep)
: Base(input), kind(kind), rep(rep) {}
auto options() const { return std::tuple{kind, rep}; }
};
std::ostream& operator<<(std::ostream& os, IntegerUnaryOp::Kind kind);
struct FloatUnaryOp : FixedArityOperationT<1, FloatUnaryOp> {
enum class Kind : uint8_t { kAbs, kNegate, kSilenceNaN };
enum class Kind : uint8_t {
kAbs,
kNegate,
kSilenceNaN,
kRoundDown, // round towards -infinity
kRoundUp, // round towards +infinity
kRoundToZero, // round towards 0
kRoundTiesEven, // break ties by rounding towards the next even number
kLog,
kSqrt,
kExp,
kExpm1,
kSin,
kCos,
kSinh,
kCosh,
kAcos,
kAsin,
kAsinh,
kAcosh,
kTan,
kTanh,
};
Kind kind;
MachineRepresentation rep;
static constexpr OpProperties properties = OpProperties::Pure();
@ -550,7 +639,9 @@ struct ShiftOp : FixedArityOperationT<2, ShiftOp> {
kShiftRightArithmeticShiftOutZeros,
kShiftRightArithmetic,
kShiftRightLogical,
kShiftLeft
kShiftLeft,
kRotateRight,
kRotateLeft
};
Kind kind;
MachineRepresentation rep;
@ -567,10 +658,11 @@ struct ShiftOp : FixedArityOperationT<2, ShiftOp> {
case Kind::kShiftRightLogical:
return true;
case Kind::kShiftLeft:
case Kind::kRotateRight:
case Kind::kRotateLeft:
return false;
}
}
static bool IsLeftShift(Kind kind) { return !IsRightShift(kind); }
ShiftOp(OpIndex left, OpIndex right, Kind kind, MachineRepresentation rep)
: Base(left, right), kind(kind), rep(rep) {}
@ -626,6 +718,8 @@ struct ChangeOp : FixedArityOperationT<1, ChangeOp> {
kUnsignedNarrowing,
// reduce integer bit-width, resulting in a modulo operation
kIntegerTruncate,
// convert between different floating-point types
kFloatConversion,
// system-specific conversion to (un)signed number
kSignedFloatTruncate,
kUnsignedFloatTruncate,
@ -660,6 +754,22 @@ struct ChangeOp : FixedArityOperationT<1, ChangeOp> {
};
std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind);
// TODO(tebbi): Unify with other operations.
struct Float64InsertWord32Op : FixedArityOperationT<2, Float64InsertWord32Op> {
enum class Kind { kLowHalf, kHighHalf };
Kind kind;
static constexpr OpProperties properties = OpProperties::Pure();
OpIndex float64() const { return input(0); }
OpIndex word32() const { return input(1); }
Float64InsertWord32Op(OpIndex float64, OpIndex word32, Kind kind)
: Base(float64, word32), kind(kind) {}
auto options() const { return std::tuple{kind}; }
};
std::ostream& operator<<(std::ostream& os, Float64InsertWord32Op::Kind kind);
struct TaggedBitcastOp : FixedArityOperationT<1, TaggedBitcastOp> {
MachineRepresentation from;
MachineRepresentation to;
@ -930,11 +1040,11 @@ struct ConstantOp : FixedArityOperationT<0, ConstantOp> {
};
// Load loaded_rep from: base + offset.
// For Kind::kOnHeap: base - kHeapObjectTag + offset
// For Kind::kOnHeap, `base` has to be the object start.
// For Kind::tagged_base: subtract kHeapObjectTag,
// `base` has to be the object start.
// For (u)int8/16, the value will be sign- or zero-extended to Word32.
struct LoadOp : FixedArityOperationT<1, LoadOp> {
enum class Kind : uint8_t { kOnHeap, kRaw };
enum class Kind { kTaggedBase, kRawAligned, kRawUnaligned };
Kind kind;
MachineType loaded_rep;
int32_t offset;
@ -949,9 +1059,19 @@ struct LoadOp : FixedArityOperationT<1, LoadOp> {
auto options() const { return std::tuple{kind, loaded_rep, offset}; }
};
inline bool IsAlignedAccess(LoadOp::Kind kind) {
switch (kind) {
case LoadOp::Kind::kTaggedBase:
case LoadOp::Kind::kRawAligned:
return true;
case LoadOp::Kind::kRawUnaligned:
return false;
}
}
// Load `loaded_rep` from: base + offset + index * 2^element_size_log2.
// For Kind::kOnHeap: subtract kHeapObjectTag,
// `base` has to be the object start.
// For Kind::tagged_base: subtract kHeapObjectTag,
// `base` has to be the object start.
// For (u)int8/16, the value will be sign- or zero-extended to Word32.
struct IndexedLoadOp : FixedArityOperationT<2, IndexedLoadOp> {
using Kind = LoadOp::Kind;
@ -979,10 +1099,10 @@ struct IndexedLoadOp : FixedArityOperationT<2, IndexedLoadOp> {
};
// Store `value` to: base + offset.
// For Kind::kOnHeap: base - kHeapObjectTag + offset
// For Kind::kOnHeap, `base` has to be the object start.
// For Kind::tagged_base: subtract kHeapObjectTag,
// `base` has to be the object start.
struct StoreOp : FixedArityOperationT<2, StoreOp> {
enum class Kind : uint8_t { kOnHeap, kRaw };
using Kind = LoadOp::Kind;
Kind kind;
MachineRepresentation stored_rep;
WriteBarrierKind write_barrier;
@ -1008,8 +1128,8 @@ struct StoreOp : FixedArityOperationT<2, StoreOp> {
};
// Store `value` to: base + offset + index * 2^element_size_log2.
// For Kind::kOnHeap: subtract kHeapObjectTag,
// `base` has to be the object start.
// For Kind::tagged_base: subtract kHeapObjectTag,
// `base` has to be the object start.
struct IndexedStoreOp : FixedArityOperationT<3, IndexedStoreOp> {
using Kind = StoreOp::Kind;
Kind kind;
@ -1041,6 +1161,19 @@ struct IndexedStoreOp : FixedArityOperationT<3, IndexedStoreOp> {
}
};
// Retain a HeapObject to prevent it from being garbage collected too early.
struct RetainOp : FixedArityOperationT<1, RetainOp> {
OpIndex retained() const { return input(0); }
// Retain doesn't actually write, it just keeps a value alive. However, since
// this must not be reordered with operations reading from the heap, we mark
// it as writing to prevent such reorderings.
static constexpr OpProperties properties = OpProperties::Writing();
explicit RetainOp(OpIndex retained) : Base(retained) {}
auto options() const { return std::tuple{}; }
};
struct StackPointerGreaterThanOp
: FixedArityOperationT<1, StackPointerGreaterThanOp> {
StackCheckKind kind;
@ -1054,13 +1187,32 @@ struct StackPointerGreaterThanOp
auto options() const { return std::tuple{kind}; }
};
struct LoadStackCheckOffsetOp
: FixedArityOperationT<0, LoadStackCheckOffsetOp> {
// Allocate a piece of memory in the current stack frame. Every operation
// in the IR is a separate stack slot, but repeated execution in a loop
// produces the same stack slot.
struct StackSlotOp : FixedArityOperationT<0, StackSlotOp> {
int size;
int alignment;
static constexpr OpProperties properties = OpProperties::Writing();
StackSlotOp(int size, int alignment) : size(size), alignment(alignment) {}
auto options() const { return std::tuple{size, alignment}; }
};
// Values that are constant for the current stack frame/invocation.
// Therefore, they behaves like a constant, even though they are different for
// every invocation.
struct FrameConstantOp : FixedArityOperationT<0, FrameConstantOp> {
enum class Kind { kStackCheckOffset, kFramePointer, kParentFramePointer };
Kind kind;
static constexpr OpProperties properties = OpProperties::Pure();
LoadStackCheckOffsetOp() : Base() {}
auto options() const { return std::tuple{}; }
explicit FrameConstantOp(Kind kind) : Base(), kind(kind) {}
auto options() const { return std::tuple{kind}; }
};
std::ostream& operator<<(std::ostream& os, FrameConstantOp::Kind kind);
struct FrameStateOp : OperationT<FrameStateOp> {
bool inlined;
@ -1078,8 +1230,8 @@ struct FrameStateOp : OperationT<FrameStateOp> {
return result;
}
explicit FrameStateOp(base::Vector<const OpIndex> inputs, bool inlined,
const FrameStateData* data)
FrameStateOp(base::Vector<const OpIndex> inputs, bool inlined,
const FrameStateData* data)
: Base(inputs), inlined(inlined), data(data) {}
void PrintOptions(std::ostream& os) const;
auto options() const { return std::tuple{inlined, data}; }
@ -1140,6 +1292,15 @@ struct ParameterOp : FixedArityOperationT<0, ParameterOp> {
void PrintOptions(std::ostream& os) const;
};
struct OsrValueOp : FixedArityOperationT<0, OsrValueOp> {
int32_t index;
static constexpr OpProperties properties = OpProperties::Pure();
explicit OsrValueOp(int32_t index) : Base(), index(index) {}
auto options() const { return std::tuple{index}; }
};
struct CallOp : OperationT<CallOp> {
const CallDescriptor* descriptor;
@ -1175,15 +1336,26 @@ struct UnreachableOp : FixedArityOperationT<0, UnreachableOp> {
};
struct ReturnOp : OperationT<ReturnOp> {
int32_t pop_count;
static constexpr OpProperties properties = OpProperties::BlockTerminator();
base::Vector<const OpIndex> return_values() const { return inputs(); }
// Number of additional stack slots to be removed.
OpIndex pop_count() const { return input(0); }
ReturnOp(base::Vector<const OpIndex> return_values, int32_t pop_count)
: Base(return_values), pop_count(pop_count) {}
auto options() const { return std::tuple{pop_count}; }
base::Vector<const OpIndex> return_values() const {
return inputs().SubVector(1, input_count);
}
ReturnOp(OpIndex pop_count, base::Vector<const OpIndex> return_values)
: Base(1 + return_values.size()) {
base::Vector<OpIndex> inputs = this->inputs();
inputs[0] = pop_count;
inputs.SubVector(1, inputs.size()).OverwriteWith(return_values);
}
static ReturnOp& New(Graph* graph, OpIndex pop_count,
base::Vector<const OpIndex> return_values) {
return Base::New(graph, 1 + return_values.size(), pop_count, return_values);
}
auto options() const { return std::tuple{}; }
};
struct GotoOp : FixedArityOperationT<0, GotoOp> {
@ -1208,6 +1380,20 @@ struct BranchOp : FixedArityOperationT<1, BranchOp> {
auto options() const { return std::tuple{if_true, if_false}; }
};
struct CatchExceptionOp : FixedArityOperationT<1, CatchExceptionOp> {
Block* if_success;
Block* if_exception;
static constexpr OpProperties properties = OpProperties::BlockTerminator();
OpIndex call() const { return input(0); }
explicit CatchExceptionOp(OpIndex call, Block* if_success,
Block* if_exception)
: Base(call), if_success(if_success), if_exception(if_exception) {}
auto options() const { return std::tuple{if_success, if_exception}; }
};
struct SwitchOp : FixedArityOperationT<1, SwitchOp> {
struct Case {
int32_t value;
@ -1238,15 +1424,19 @@ struct SwitchOp : FixedArityOperationT<1, SwitchOp> {
// For operations that produce multiple results, we use `ProjectionOp` to
// distinguish them.
struct ProjectionOp : FixedArityOperationT<1, ProjectionOp> {
enum class Kind { kResult, kOverflowBit };
enum class Kind : uint8_t { kExceptionValue, kTuple };
Kind kind;
uint16_t index;
static constexpr OpProperties properties = OpProperties::Pure();
OpIndex input() const { return Base::input(0); }
ProjectionOp(OpIndex input, Kind kind) : Base(input), kind(kind) {}
auto options() const { return std::tuple{kind}; }
ProjectionOp(OpIndex input, Kind kind, uint16_t index)
: Base(input), kind(kind), index(index) {
DCHECK_IMPLIES(kind != Kind::kTuple, index == 0);
}
auto options() const { return std::tuple{kind, index}; }
};
std::ostream& operator<<(std::ostream& os, ProjectionOp::Kind kind);

View File

@ -148,6 +148,10 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
for (auto it = op_range.begin(); it != op_range.end(); ++it) {
const Operation& op = *it;
OpIndex index = it.Index();
if (V8_UNLIKELY(!input_graph.source_positions().empty())) {
assembler.SetCurrentSourcePosition(
input_graph.source_positions()[index]);
}
OpIndex first_output_index = assembler.graph().next_operation_index();
USE(first_output_index);
if constexpr (trace_reduction) TraceReductionStart(index);
@ -235,6 +239,12 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
Block* if_false = MapToNewGraph(op.if_false->index());
return assembler.Branch(MapToNewGraph(op.condition()), if_true, if_false);
}
OpIndex ReduceCatchException(const CatchExceptionOp& op) {
Block* if_success = MapToNewGraph(op.if_success->index());
Block* if_exception = MapToNewGraph(op.if_exception->index());
return assembler.CatchException(MapToNewGraph(op.call()), if_success,
if_exception);
}
OpIndex ReduceSwitch(const SwitchOp& op) {
base::SmallVector<SwitchOp::Case, 16> cases;
for (SwitchOp::Case c : op.cases) {
@ -277,13 +287,17 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
return assembler.Call(callee, base::VectorOf(arguments), op.descriptor);
}
OpIndex ReduceReturn(const ReturnOp& op) {
auto inputs = MapToNewGraph<4>(op.inputs());
return assembler.Return(base::VectorOf(inputs), op.pop_count);
auto return_values = MapToNewGraph<4>(op.return_values());
return assembler.Return(MapToNewGraph(op.pop_count()),
base::VectorOf(return_values));
}
OpIndex ReduceOverflowCheckedBinop(const OverflowCheckedBinopOp& op) {
return assembler.OverflowCheckedBinop(
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
}
OpIndex ReduceIntegerUnary(const IntegerUnaryOp& op) {
return assembler.IntegerUnary(MapToNewGraph(op.input()), op.kind, op.rep);
}
OpIndex ReduceFloatUnary(const FloatUnaryOp& op) {
return assembler.FloatUnary(MapToNewGraph(op.input()), op.kind, op.rep);
}
@ -302,6 +316,10 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
OpIndex ReduceChange(const ChangeOp& op) {
return assembler.Change(MapToNewGraph(op.input()), op.kind, op.from, op.to);
}
OpIndex ReduceFloat64InsertWord32(const Float64InsertWord32Op& op) {
return assembler.Float64InsertWord32(MapToNewGraph(op.float64()),
MapToNewGraph(op.word32()), op.kind);
}
OpIndex ReduceTaggedBitcast(const TaggedBitcastOp& op) {
return assembler.TaggedBitcast(MapToNewGraph(op.input()), op.from, op.to);
}
@ -327,15 +345,24 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
MapToNewGraph(op.value()), op.kind, op.stored_rep, op.write_barrier,
op.offset, op.element_size_log2);
}
OpIndex ReduceRetain(const RetainOp& op) {
return assembler.Retain(MapToNewGraph(op.retained()));
}
OpIndex ReduceParameter(const ParameterOp& op) {
return assembler.Parameter(op.parameter_index, op.debug_name);
}
OpIndex ReduceOsrValue(const OsrValueOp& op) {
return assembler.OsrValue(op.index);
}
OpIndex ReduceStackPointerGreaterThan(const StackPointerGreaterThanOp& op) {
return assembler.StackPointerGreaterThan(MapToNewGraph(op.stack_limit()),
op.kind);
}
OpIndex ReduceLoadStackCheckOffset(const LoadStackCheckOffsetOp& op) {
return assembler.LoadStackCheckOffset();
OpIndex ReduceStackSlot(const StackSlotOp& op) {
return assembler.StackSlot(op.size, op.alignment);
}
OpIndex ReduceFrameConstant(const FrameConstantOp& op) {
return assembler.FrameConstant(op.kind);
}
OpIndex ReduceCheckLazyDeopt(const CheckLazyDeoptOp& op) {
return assembler.CheckLazyDeopt(MapToNewGraph(op.call()),
@ -350,7 +377,7 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
op.parameters);
}
OpIndex ReduceProjection(const ProjectionOp& op) {
return assembler.Projection(MapToNewGraph(op.input()), op.kind);
return assembler.Projection(MapToNewGraph(op.input()), op.kind, op.index);
}
OpIndex ReduceBinop(const BinopOp& op) {
return assembler.Binop(MapToNewGraph(op.left()), MapToNewGraph(op.right()),

View File

@ -4,13 +4,17 @@
#include "src/compiler/turboshaft/recreate-schedule.h"
#include "src/base/logging.h"
#include "src/base/safe_conversions.h"
#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
#include "src/base/vector.h"
#include "src/codegen/machine-type.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
@ -20,6 +24,8 @@
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/write-barrier-kind.h"
#include "src/utils/utils.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
@ -31,6 +37,7 @@ struct ScheduleBuilder {
CallDescriptor* call_descriptor;
Zone* graph_zone;
Zone* phase_zone;
SourcePositionTable* source_positions;
const size_t node_count_estimate =
static_cast<size_t>(1.1 * input_graph.op_id_count());
@ -47,6 +54,7 @@ struct ScheduleBuilder {
compiler::BasicBlock* current_block = schedule->start();
const Block* current_input_block = nullptr;
ZoneUnorderedMap<int, Node*> parameters{phase_zone};
ZoneUnorderedMap<int, Node*> osr_values{phase_zone};
std::vector<BasicBlock*> blocks = {};
std::vector<Node*> nodes{input_graph.op_id_count()};
std::vector<std::pair<Node*, OpIndex>> loop_phis = {};
@ -104,17 +112,11 @@ Node* ScheduleBuilder::AddNode(const Operator* op,
RecreateScheduleResult ScheduleBuilder::Run() {
DCHECK_GE(input_graph.block_count(), 1);
// The schedule needs to contain an dummy end block because the register
// allocator expects this. This block is not actually reachable with control
// flow. It is added here because the Turboshaft grahp doesn't contain such a
// block.
blocks.reserve(input_graph.block_count() + 1);
blocks.reserve(input_graph.block_count());
blocks.push_back(current_block);
for (size_t i = 1; i < input_graph.block_count(); ++i) {
blocks.push_back(schedule->NewBasicBlock());
}
blocks.push_back(schedule->end());
DCHECK_EQ(blocks.size(), input_graph.block_count() + 1);
// The value output count of the start node does not actually matter.
tf_graph->SetStart(tf_graph->NewNode(common.Start(0)));
tf_graph->SetEnd(tf_graph->NewNode(common.End(0)));
@ -136,7 +138,6 @@ RecreateScheduleResult ScheduleBuilder::Run() {
DCHECK(schedule->rpo_order()->empty());
Scheduler::ComputeSpecialRPO(phase_zone, schedule);
Scheduler::GenerateDominatorTree(schedule);
DCHECK_EQ(schedule->rpo_order()->size(), blocks.size());
return {tf_graph, schedule};
}
@ -150,7 +151,13 @@ void ScheduleBuilder::ProcessOperation(const Operation& op) {
TURBOSHAFT_OPERATION_LIST(SWITCH_CASE)
#undef SWITCH_CASE
}
nodes[input_graph.Index(op).id()] = node;
OpIndex index = input_graph.Index(op);
DCHECK_LT(index.id(), nodes.size());
nodes[index.id()] = node;
if (source_positions && node) {
source_positions->SetSourcePosition(node,
input_graph.source_positions()[index]);
}
}
Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
@ -167,6 +174,24 @@ Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
case BinopOp::Kind::kMul:
o = machine.Int32Mul();
break;
case BinopOp::Kind::kSignedMulOverflownBits:
o = machine.Int32MulHigh();
break;
case BinopOp::Kind::kUnsignedMulOverflownBits:
o = machine.Uint32MulHigh();
break;
case BinopOp::Kind::kSignedDiv:
o = machine.Int32Div();
break;
case BinopOp::Kind::kUnsignedDiv:
o = machine.Uint32Div();
break;
case BinopOp::Kind::kSignedMod:
o = machine.Int32Mod();
break;
case BinopOp::Kind::kUnsignedMod:
o = machine.Uint32Mod();
break;
case BinopOp::Kind::kBitwiseAnd:
o = machine.Word32And();
break;
@ -176,6 +201,11 @@ Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
case BinopOp::Kind::kBitwiseXor:
o = machine.Word32Xor();
break;
case BinopOp::Kind::kMin:
case BinopOp::Kind::kMax:
case BinopOp::Kind::kPower:
case BinopOp::Kind::kAtan2:
UNREACHABLE();
}
break;
case MachineRepresentation::kWord64:
@ -189,6 +219,18 @@ Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
case BinopOp::Kind::kMul:
o = machine.Int64Mul();
break;
case BinopOp::Kind::kSignedDiv:
o = machine.Int64Div();
break;
case BinopOp::Kind::kUnsignedDiv:
o = machine.Uint64Div();
break;
case BinopOp::Kind::kSignedMod:
o = machine.Int64Mod();
break;
case BinopOp::Kind::kUnsignedMod:
o = machine.Uint64Mod();
break;
case BinopOp::Kind::kBitwiseAnd:
o = machine.Word64And();
break;
@ -198,6 +240,13 @@ Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
case BinopOp::Kind::kBitwiseXor:
o = machine.Word64Xor();
break;
case BinopOp::Kind::kMin:
case BinopOp::Kind::kMax:
case BinopOp::Kind::kSignedMulOverflownBits:
case BinopOp::Kind::kUnsignedMulOverflownBits:
case BinopOp::Kind::kPower:
case BinopOp::Kind::kAtan2:
UNREACHABLE();
}
break;
case MachineRepresentation::kFloat32:
@ -211,9 +260,25 @@ Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
case BinopOp::Kind::kMul:
o = machine.Float32Mul();
break;
case BinopOp::Kind::kSignedDiv:
o = machine.Float32Div();
break;
case BinopOp::Kind::kMin:
o = machine.Float32Min();
break;
case BinopOp::Kind::kMax:
o = machine.Float32Max();
break;
case BinopOp::Kind::kSignedMulOverflownBits:
case BinopOp::Kind::kUnsignedMulOverflownBits:
case BinopOp::Kind::kUnsignedDiv:
case BinopOp::Kind::kSignedMod:
case BinopOp::Kind::kUnsignedMod:
case BinopOp::Kind::kBitwiseAnd:
case BinopOp::Kind::kBitwiseOr:
case BinopOp::Kind::kBitwiseXor:
case BinopOp::Kind::kPower:
case BinopOp::Kind::kAtan2:
UNREACHABLE();
}
break;
@ -228,9 +293,31 @@ Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
case BinopOp::Kind::kMul:
o = machine.Float64Mul();
break;
case BinopOp::Kind::kSignedDiv:
o = machine.Float64Div();
break;
case BinopOp::Kind::kSignedMod:
o = machine.Float64Mod();
break;
case BinopOp::Kind::kMin:
o = machine.Float64Min();
break;
case BinopOp::Kind::kMax:
o = machine.Float64Max();
break;
case BinopOp::Kind::kPower:
o = machine.Float64Pow();
break;
case BinopOp::Kind::kAtan2:
o = machine.Float64Atan2();
break;
case BinopOp::Kind::kSignedMulOverflownBits:
case BinopOp::Kind::kUnsignedMulOverflownBits:
case BinopOp::Kind::kBitwiseAnd:
case BinopOp::Kind::kBitwiseOr:
case BinopOp::Kind::kBitwiseXor:
case BinopOp::Kind::kUnsignedDiv:
case BinopOp::Kind::kUnsignedMod:
UNREACHABLE();
}
break;
@ -272,77 +359,136 @@ Node* ScheduleBuilder::ProcessOperation(const OverflowCheckedBinopOp& op) {
}
return AddNode(o, {GetNode(op.left()), GetNode(op.right())});
}
Node* ScheduleBuilder::ProcessOperation(const IntegerUnaryOp& op) {
DCHECK(op.rep == MachineRepresentation::kWord32 ||
op.rep == MachineRepresentation::kWord64);
bool word64 = op.rep == MachineRepresentation::kWord64;
const Operator* o;
switch (op.kind) {
case IntegerUnaryOp::Kind::kReverseBytes:
o = word64 ? machine.Word64ReverseBytes() : machine.Word32ReverseBytes();
break;
case IntegerUnaryOp::Kind::kCountLeadingZeros:
o = word64 ? machine.Word64Clz() : machine.Word32Clz();
break;
}
return AddNode(o, {GetNode(op.input())});
}
Node* ScheduleBuilder::ProcessOperation(const FloatUnaryOp& op) {
DCHECK(op.rep == MachineRepresentation::kFloat32 ||
op.rep == MachineRepresentation::kFloat64);
bool float64 = op.rep == MachineRepresentation::kFloat64;
const Operator* o;
switch (op.kind) {
case FloatUnaryOp::Kind::kAbs:
switch (op.rep) {
case MachineRepresentation::kFloat32:
o = machine.Float32Abs();
break;
case MachineRepresentation::kFloat64:
o = machine.Float64Abs();
break;
default:
UNREACHABLE();
}
o = float64 ? machine.Float64Abs() : machine.Float32Abs();
break;
case FloatUnaryOp::Kind::kNegate:
switch (op.rep) {
case MachineRepresentation::kFloat32:
o = machine.Float32Neg();
break;
case MachineRepresentation::kFloat64:
o = machine.Float64Neg();
break;
default:
UNREACHABLE();
}
o = float64 ? machine.Float64Neg() : machine.Float32Neg();
break;
case FloatUnaryOp::Kind::kRoundDown:
o = float64 ? machine.Float64RoundDown().op()
: machine.Float32RoundDown().op();
break;
case FloatUnaryOp::Kind::kRoundUp:
o = float64 ? machine.Float64RoundUp().op()
: machine.Float32RoundUp().op();
break;
case FloatUnaryOp::Kind::kRoundToZero:
o = float64 ? machine.Float64RoundTruncate().op()
: machine.Float32RoundTruncate().op();
break;
case FloatUnaryOp::Kind::kRoundTiesEven:
o = float64 ? machine.Float64RoundTiesEven().op()
: machine.Float32RoundTiesEven().op();
break;
case FloatUnaryOp::Kind::kSqrt:
o = float64 ? machine.Float64Sqrt() : machine.Float32Sqrt();
break;
case FloatUnaryOp::Kind::kSilenceNaN:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64SilenceNaN();
break;
case FloatUnaryOp::Kind::kLog:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Log();
break;
case FloatUnaryOp::Kind::kExp:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Exp();
break;
case FloatUnaryOp::Kind::kExpm1:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Expm1();
break;
case FloatUnaryOp::Kind::kSin:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Sin();
break;
case FloatUnaryOp::Kind::kCos:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Cos();
break;
case FloatUnaryOp::Kind::kAsin:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Asin();
break;
case FloatUnaryOp::Kind::kAcos:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Acos();
break;
case FloatUnaryOp::Kind::kSinh:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Sinh();
break;
case FloatUnaryOp::Kind::kCosh:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Cosh();
break;
case FloatUnaryOp::Kind::kAsinh:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Asinh();
break;
case FloatUnaryOp::Kind::kAcosh:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Acosh();
break;
case FloatUnaryOp::Kind::kTan:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Tan();
break;
case FloatUnaryOp::Kind::kTanh:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64Tanh();
break;
}
return AddNode(o, {GetNode(op.input())});
}
Node* ScheduleBuilder::ProcessOperation(const ShiftOp& op) {
DCHECK(op.rep == MachineRepresentation::kWord32 ||
op.rep == MachineRepresentation::kWord64);
bool word64 = op.rep == MachineRepresentation::kWord64;
const Operator* o;
switch (op.rep) {
case MachineRepresentation::kWord32:
switch (op.kind) {
case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
o = machine.Word32SarShiftOutZeros();
break;
case ShiftOp::Kind::kShiftRightArithmetic:
o = machine.Word32Sar();
break;
case ShiftOp::Kind::kShiftRightLogical:
o = machine.Word32Shr();
break;
case ShiftOp::Kind::kShiftLeft:
o = machine.Word32Shl();
break;
}
switch (op.kind) {
case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
o = word64 ? machine.Word64SarShiftOutZeros()
: machine.Word32SarShiftOutZeros();
break;
case MachineRepresentation::kWord64:
switch (op.kind) {
case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
o = machine.Word64SarShiftOutZeros();
break;
case ShiftOp::Kind::kShiftRightArithmetic:
o = machine.Word64Sar();
break;
case ShiftOp::Kind::kShiftRightLogical:
o = machine.Word64Shr();
break;
case ShiftOp::Kind::kShiftLeft:
o = machine.Word64Shl();
break;
}
case ShiftOp::Kind::kShiftRightArithmetic:
o = word64 ? machine.Word64Sar() : machine.Word32Sar();
break;
case ShiftOp::Kind::kShiftRightLogical:
o = word64 ? machine.Word64Shr() : machine.Word32Shr();
break;
case ShiftOp::Kind::kShiftLeft:
o = word64 ? machine.Word64Shl() : machine.Word32Shl();
break;
case ShiftOp::Kind::kRotateLeft:
o = word64 ? machine.Word64Rol().op() : machine.Word32Rol().op();
break;
case ShiftOp::Kind::kRotateRight:
o = word64 ? machine.Word64Ror() : machine.Word32Ror();
break;
default:
UNREACHABLE();
}
return AddNode(o, {GetNode(op.left()), GetNode(op.right())});
}
@ -444,6 +590,17 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
UNIMPLEMENTED();
}
break;
case Kind::kFloatConversion:
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kFloat32) {
o = machine.TruncateFloat64ToFloat32();
} else if (op.from == MachineRepresentation::kFloat32 &&
op.to == MachineRepresentation::kFloat64) {
o = machine.ChangeFloat32ToFloat64();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignedFloatTruncate:
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
@ -504,6 +661,18 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
if (op.from == MachineRepresentation::kWord32 &&
op.to == MachineRepresentation::kWord64) {
o = machine.BitcastWord32ToWord64();
} else if (op.from == MachineRepresentation::kFloat32 &&
op.to == MachineRepresentation::kWord32) {
o = machine.BitcastFloat32ToInt32();
} else if (op.from == MachineRepresentation::kWord32 &&
op.to == MachineRepresentation::kFloat32) {
o = machine.BitcastInt32ToFloat32();
} else if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
o = machine.BitcastFloat64ToInt64();
} else if (op.from == MachineRepresentation::kWord64 &&
op.to == MachineRepresentation::kFloat64) {
o = machine.BitcastInt64ToFloat64();
} else {
UNIMPLEMENTED();
}
@ -528,9 +697,8 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
o = machine.ChangeFloat64ToInt64();
}
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
} else if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
o = machine.ChangeFloat64ToInt32();
} else {
UNIMPLEMENTED();
@ -540,9 +708,8 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
o = machine.ChangeFloat64ToUint64();
}
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
} else if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
o = machine.ChangeFloat64ToUint32();
} else {
UNIMPLEMENTED();
@ -551,6 +718,16 @@ Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
}
return AddNode(o, {GetNode(op.input())});
}
Node* ScheduleBuilder::ProcessOperation(const Float64InsertWord32Op& op) {
switch (op.kind) {
case Float64InsertWord32Op::Kind::kHighHalf:
return AddNode(machine.Float64InsertHighWord32(),
{GetNode(op.float64()), GetNode(op.word32())});
case Float64InsertWord32Op::Kind::kLowHalf:
return AddNode(machine.Float64InsertLowWord32(),
{GetNode(op.float64()), GetNode(op.word32())});
}
}
Node* ScheduleBuilder::ProcessOperation(const TaggedBitcastOp& op) {
const Operator* o;
if (op.from == MachineRepresentation::kTagged &&
@ -595,16 +772,19 @@ Node* ScheduleBuilder::ProcessOperation(const ConstantOp& op) {
}
Node* ScheduleBuilder::ProcessOperation(const LoadOp& op) {
intptr_t offset = op.offset;
if (op.kind == LoadOp::Kind::kOnHeap) {
if (op.kind == LoadOp::Kind::kTaggedBase) {
CHECK_GE(offset, std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
Node* base = GetNode(op.base());
return AddNode(machine.Load(op.loaded_rep), {base, IntPtrConstant(offset)});
return AddNode(IsAlignedAccess(op.kind)
? machine.Load(op.loaded_rep)
: machine.UnalignedLoad(op.loaded_rep),
{base, IntPtrConstant(offset)});
}
Node* ScheduleBuilder::ProcessOperation(const IndexedLoadOp& op) {
intptr_t offset = op.offset;
if (op.kind == IndexedLoadOp::Kind::kOnHeap) {
if (op.kind == LoadOp::Kind::kTaggedBase) {
CHECK_GE(offset, std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
@ -616,23 +796,31 @@ Node* ScheduleBuilder::ProcessOperation(const IndexedLoadOp& op) {
if (offset != 0) {
index = IntPtrAdd(index, IntPtrConstant(offset));
}
return AddNode(machine.Load(op.loaded_rep), {base, index});
return AddNode(IsAlignedAccess(op.kind)
? machine.Load(op.loaded_rep)
: machine.UnalignedLoad(op.loaded_rep),
{base, index});
}
Node* ScheduleBuilder::ProcessOperation(const StoreOp& op) {
intptr_t offset = op.offset;
if (op.kind == StoreOp::Kind::kOnHeap) {
if (op.kind == StoreOp::Kind::kTaggedBase) {
CHECK(offset >= std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
Node* base = GetNode(op.base());
Node* value = GetNode(op.value());
return AddNode(
machine.Store(StoreRepresentation(op.stored_rep, op.write_barrier)),
{base, IntPtrConstant(offset), value});
const Operator* o;
if (IsAlignedAccess(op.kind)) {
o = machine.Store(StoreRepresentation(op.stored_rep, op.write_barrier));
} else {
DCHECK_EQ(op.write_barrier, WriteBarrierKind::kNoWriteBarrier);
o = machine.UnalignedStore(op.stored_rep);
}
return AddNode(o, {base, IntPtrConstant(offset), value});
}
Node* ScheduleBuilder::ProcessOperation(const IndexedStoreOp& op) {
intptr_t offset = op.offset;
if (op.kind == IndexedStoreOp::Kind::kOnHeap) {
if (op.kind == IndexedStoreOp::Kind::kTaggedBase) {
CHECK(offset >= std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
@ -645,9 +833,17 @@ Node* ScheduleBuilder::ProcessOperation(const IndexedStoreOp& op) {
if (offset != 0) {
index = IntPtrAdd(index, IntPtrConstant(offset));
}
return AddNode(
machine.Store(StoreRepresentation(op.stored_rep, op.write_barrier)),
{base, index, value});
const Operator* o;
if (IsAlignedAccess(op.kind)) {
o = machine.Store(StoreRepresentation(op.stored_rep, op.write_barrier));
} else {
DCHECK_EQ(op.write_barrier, WriteBarrierKind::kNoWriteBarrier);
o = machine.UnalignedStore(op.stored_rep);
}
return AddNode(o, {base, index, value});
}
Node* ScheduleBuilder::ProcessOperation(const RetainOp& op) {
return AddNode(common.Retain(), {GetNode(op.retained())});
}
Node* ScheduleBuilder::ProcessOperation(const ParameterOp& op) {
// Parameters need to be cached because the register allocator assumes that
@ -662,6 +858,17 @@ Node* ScheduleBuilder::ProcessOperation(const ParameterOp& op) {
parameters[op.parameter_index] = parameter;
return parameter;
}
Node* ScheduleBuilder::ProcessOperation(const OsrValueOp& op) {
// OSR values behave like parameters, so they also need to be cached.
if (osr_values.count(op.index)) {
return osr_values[op.index];
}
Node* osr_value = MakeNode(common.OsrValue(static_cast<int>(op.index)),
{tf_graph->start()});
schedule->AddNode(schedule->start(), osr_value);
osr_values[op.index] = osr_value;
return osr_value;
}
Node* ScheduleBuilder::ProcessOperation(const GotoOp& op) {
schedule->AddGoto(current_block, blocks[op.destination->index().id()]);
current_block = nullptr;
@ -671,8 +878,18 @@ Node* ScheduleBuilder::ProcessOperation(const StackPointerGreaterThanOp& op) {
return AddNode(machine.StackPointerGreaterThan(op.kind),
{GetNode(op.stack_limit())});
}
Node* ScheduleBuilder::ProcessOperation(const LoadStackCheckOffsetOp& op) {
return AddNode(machine.LoadStackCheckOffset(), {});
Node* ScheduleBuilder::ProcessOperation(const StackSlotOp& op) {
return AddNode(machine.StackSlot(op.size, op.alignment), {});
}
Node* ScheduleBuilder::ProcessOperation(const FrameConstantOp& op) {
switch (op.kind) {
case FrameConstantOp::Kind::kStackCheckOffset:
return AddNode(machine.LoadStackCheckOffset(), {});
case FrameConstantOp::Kind::kFramePointer:
return AddNode(machine.LoadFramePointer(), {});
case FrameConstantOp::Kind::kParentFramePointer:
return AddNode(machine.LoadParentFramePointer(), {});
}
}
Node* ScheduleBuilder::ProcessOperation(const CheckLazyDeoptOp& op) {
Node* call = GetNode(op.call());
@ -718,10 +935,20 @@ Node* ScheduleBuilder::ProcessOperation(const PhiOp& op) {
}
Node* ScheduleBuilder::ProcessOperation(const ProjectionOp& op) {
switch (op.kind) {
case ProjectionOp::Kind::kOverflowBit:
return AddNode(common.Projection(1), {GetNode(op.input())});
case ProjectionOp::Kind::kResult:
return AddNode(common.Projection(0), {GetNode(op.input())});
case ProjectionOp::Kind::kTuple:
return AddNode(common.Projection(op.index), {GetNode(op.input())});
case ProjectionOp::Kind::kExceptionValue: {
// The `IfException` projection was created when processing
// `CatchExceptionOp`, so we just need to find it here.
Node* call = GetNode(op.input());
DCHECK_EQ(call->opcode(), IrOpcode::kCall);
for (Node* use : call->uses()) {
if (use->opcode() == IrOpcode::kIfException) {
return use;
}
}
UNREACHABLE();
}
}
}
@ -750,13 +977,23 @@ std::pair<Node*, MachineType> ScheduleBuilder::BuildDeoptInput(
}
return {AddNode(common.TypedObjectState(obj_id, &field_types),
base::VectorOf(fields)),
MachineType::TaggedPointer()};
MachineType::AnyTagged()};
}
case Instr::kDematerializedObjectReference: {
uint32_t obj_id;
it->ConsumeDematerializedObjectReference(&obj_id);
return {AddNode(common.ObjectId(obj_id), {}),
MachineType::TaggedPointer()};
return {AddNode(common.ObjectId(obj_id), {}), MachineType::AnyTagged()};
}
case Instr::kArgumentsElements: {
CreateArgumentsType type;
it->ConsumeArgumentsElements(&type);
return {AddNode(common.ArgumentsElementsState(type), {}),
MachineType::AnyTagged()};
}
case Instr::kArgumentsLength: {
it->ConsumeArgumentsLength();
return {AddNode(common.ArgumentsLengthState(), {}),
MachineType::AnyTagged()};
}
case Instr::kUnusedRegister:
UNREACHABLE();
@ -850,8 +1087,7 @@ Node* ScheduleBuilder::ProcessOperation(const UnreachableOp& op) {
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const ReturnOp& op) {
Node* pop_count = AddNode(common.Int32Constant(op.pop_count), {});
base::SmallVector<Node*, 8> inputs = {pop_count};
base::SmallVector<Node*, 8> inputs = {GetNode(op.pop_count())};
for (OpIndex i : op.return_values()) {
inputs.push_back(GetNode(i));
}
@ -868,11 +1104,24 @@ Node* ScheduleBuilder::ProcessOperation(const BranchOp& op) {
BasicBlock* true_block = GetBlock(*op.if_true);
BasicBlock* false_block = GetBlock(*op.if_false);
schedule->AddBranch(current_block, branch, true_block, false_block);
true_block->AddNode(MakeNode(common.IfTrue(), {branch}));
false_block->AddNode(MakeNode(common.IfFalse(), {branch}));
schedule->AddNode(true_block, MakeNode(common.IfTrue(), {branch}));
schedule->AddNode(false_block, MakeNode(common.IfFalse(), {branch}));
current_block = nullptr;
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const CatchExceptionOp& op) {
Node* call = GetNode(op.call());
BasicBlock* success_block = GetBlock(*op.if_success);
BasicBlock* exception_block = GetBlock(*op.if_exception);
schedule->AddCall(current_block, call, success_block, exception_block);
Node* if_success = MakeNode(common.IfSuccess(), {call});
schedule->AddNode(success_block, if_success);
// Pass `call` as both the effect and control input of `IfException`.
schedule->AddNode(exception_block,
MakeNode(common.IfException(), {call, call}));
current_block = nullptr;
return if_success;
}
Node* ScheduleBuilder::ProcessOperation(const SwitchOp& op) {
size_t succ_count = op.cases.size() + 1;
Node* switch_node =
@ -899,8 +1148,10 @@ Node* ScheduleBuilder::ProcessOperation(const SwitchOp& op) {
RecreateScheduleResult RecreateSchedule(const Graph& graph,
CallDescriptor* call_descriptor,
Zone* graph_zone, Zone* phase_zone) {
ScheduleBuilder builder{graph, call_descriptor, graph_zone, phase_zone};
Zone* graph_zone, Zone* phase_zone,
SourcePositionTable* source_positions) {
ScheduleBuilder builder{graph, call_descriptor, graph_zone, phase_zone,
source_positions};
return builder.Run();
}

View File

@ -5,6 +5,7 @@
#ifndef V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_
#define V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_
#include "src/compiler/compiler-source-position-table.h"
namespace v8::internal {
class Zone;
}
@ -23,7 +24,8 @@ struct RecreateScheduleResult {
RecreateScheduleResult RecreateSchedule(const Graph& graph,
CallDescriptor* call_descriptor,
Zone* graph_zone, Zone* phase_zone);
Zone* graph_zone, Zone* phase_zone,
SourcePositionTable* source_positions);
} // namespace v8::internal::compiler::turboshaft

View File

@ -0,0 +1,71 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_SIDETABLE_H_
#define V8_COMPILER_TURBOSHAFT_SIDETABLE_H_
#include <algorithm>
#include <iterator>
#include <limits>
#include <memory>
#include <type_traits>
#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
// This sidetable is a conceptually infinite mapping from Turboshaft operation
// indices to values. It grows automatically and default-initializes the table
// when accessed out-of-bounds.
template <class T>
class GrowingSidetable {
public:
explicit GrowingSidetable(Zone* zone) : table_(zone) {}
T& operator[](OpIndex op) {
size_t i = op.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
// `resize()`.
table_.resize(table_.capacity());
}
return table_[i];
}
const T& operator[](OpIndex op) const {
size_t i = op.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
// `resize()`.
table_.resize(table_.capacity());
}
return table_[i];
}
// Reset by filling the table with the default value instead of shrinking to
// keep the memory for later phases.
void Reset() { std::fill(table_.begin(), table_.end(), T{}); }
// Returns `true` if the table never contained any values, even before
// `Reset()`.
bool empty() { return table_.empty(); }
private:
mutable ZoneVector<T> table_;
size_t NextSize(size_t out_of_bounds_index) const {
DCHECK_GE(out_of_bounds_index, table_.size());
return out_of_bounds_index + out_of_bounds_index / 2 + 32;
}
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_SIDETABLE_H_

View File

@ -228,11 +228,6 @@
# Needs deterministic test helpers for concurrent maglev tiering.
# TODO(jgruber,v8:7700): Implement ASAP.
'maglev/18': [SKIP],
# Stress variants cause operators that are currently still unsupported by
# TurboShaft.
# TODO(v8:12783)
'turboshaft/simple': [PASS, NO_VARIANTS],
}], # ALWAYS
##############################################################################

View File

@ -17,6 +17,7 @@ ALL_VARIANT_FLAGS = {
"sparkplug": [["--sparkplug"]],
# TODO(v8:v8:7700): Support concurrent compilation and remove flag.
"maglev": [["--maglev", "--no-concurrent-recompilation"]],
"turboshaft": [["--turboshaft"]],
"concurrent_sparkplug": [["--concurrent-sparkplug", "--sparkplug"]],
"always_sparkplug": [["--always-sparkplug", "--sparkplug"]],
"minor_mc": [["--minor-mc"]],

View File

@ -36,15 +36,20 @@ MORE_VARIANTS = [
]
VARIANT_ALIASES = {
# The default for developer workstations.
'dev': VARIANTS,
# Additional variants, run on all bots.
'more': MORE_VARIANTS,
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': ['nooptimization', 'future', 'no_wasm_traps',
'instruction_scheduling', 'always_sparkplug'],
# The default for developer workstations.
'dev':
VARIANTS,
# Additional variants, run on all bots.
'more':
MORE_VARIANTS,
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive':
MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': [
'nooptimization', 'future', 'no_wasm_traps', 'instruction_scheduling',
'always_sparkplug', 'turboshaft'
],
}
# Extra flags passed to all tests using the standard test runner.