[turboshaft] initial commit

TurboShaft is a new, CFG-based IR for TurboFan.
This CL adds the basic IR and bidirectional translation from/to
TurboFan's sea-of-nodes-based IR for some common operators (still
incomplete even for JS).

Bug: v8:12783
Change-Id: I162fdf10d583a9275a9f655f5b44b888faf813f6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3563562
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80136}
This commit is contained in:
Tobias Tebbi 2022-04-25 08:27:09 +00:00 committed by V8 LUCI CQ
parent f11e402812
commit e4cc6ed44b
28 changed files with 4488 additions and 13 deletions

View File

@ -2798,6 +2798,16 @@ filegroup(
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.cc",
"src/compiler/store-store-elimination.h",
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/deopt-data.h",
"src/compiler/turboshaft/graph-builder.cc",
"src/compiler/turboshaft/graph-builder.h",
"src/compiler/turboshaft/graph.cc",
"src/compiler/turboshaft/graph.h",
"src/compiler/turboshaft/operations.cc",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.cc",

View File

@ -2876,6 +2876,12 @@ v8_header_set("v8_internal_headers") {
"src/compiler/simplified-operator.h",
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.h",
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/deopt-data.h",
"src/compiler/turboshaft/graph-builder.h",
"src/compiler/turboshaft/graph.h",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.h",
"src/compiler/typed-optimization.h",
@ -4060,6 +4066,34 @@ v8_source_set("v8_compiler") {
configs = [ ":internal_config" ]
}
# The src/compiler files with default optimization behavior.
v8_source_set("v8_turboshaft") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/compiler/turboshaft/graph-builder.cc",
"src/compiler/turboshaft/graph.cc",
"src/compiler/turboshaft/operations.cc",
"src/compiler/turboshaft/recreate-schedule.cc",
]
public_deps = [
":generate_bytecode_builtins_list",
":run_torque",
":v8_internal_headers",
":v8_maybe_icu",
":v8_tracing",
]
deps = [
":v8_base_without_compiler",
":v8_libbase",
":v8_shared_internal_headers",
]
configs = [ ":internal_config" ]
}
group("v8_compiler_for_mksnapshot") {
if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
deps = [ ":v8_compiler_opt" ]
@ -4971,6 +5005,7 @@ group("v8_base") {
public_deps = [
":v8_base_without_compiler",
":v8_compiler",
":v8_turboshaft",
]
}
@ -5879,6 +5914,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
":v8_maybe_icu",
":v8_shared_internal_headers",
":v8_tracing",
":v8_turboshaft",
"//build/win:default_exe_manifest",
]
}

View File

@ -11,6 +11,7 @@
#include <cstddef>
#include <cstring>
#include <functional>
#include <type_traits>
#include <utility>
#include "src/base/base-export.h"
@ -137,6 +138,22 @@ V8_INLINE size_t hash_value(std::pair<T1, T2> const& v) {
return hash_combine(v.first, v.second);
}
template <typename... T, size_t... I>
V8_INLINE size_t hash_value_impl(std::tuple<T...> const& v,
std::index_sequence<I...>) {
return hash_combine(std::get<I>(v)...);
}
template <typename... T>
V8_INLINE size_t hash_value(std::tuple<T...> const& v) {
return hash_value_impl(v, std::make_index_sequence<sizeof...(T)>());
}
template <typename T, typename = std::enable_if_t<std::is_enum<T>::value>>
V8_INLINE size_t hash_value(T v) {
return hash_value(static_cast<std::underlying_type_t<T>>(v));
}
template <typename T>
struct hash {
V8_INLINE size_t operator()(T const& v) const { return hash_value(v); }

View File

@ -39,12 +39,12 @@ class iterator_range {
iterator_range(ForwardIterator begin, ForwardIterator end)
: begin_(begin), end_(end) {}
iterator begin() { return begin_; }
iterator end() { return end_; }
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
iterator begin() const { return begin_; }
iterator end() const { return end_; }
const_iterator cbegin() const { return begin_; }
const_iterator cend() const { return end_; }
auto rbegin() const { return std::make_reverse_iterator(end_); }
auto rend() const { return std::make_reverse_iterator(begin_); }
bool empty() const { return cbegin() == cend(); }
@ -62,6 +62,24 @@ auto make_iterator_range(ForwardIterator begin, ForwardIterator end) {
return iterator_range<ForwardIterator>{begin, end};
}
template <class T>
struct DerefPtrIterator : base::iterator<std::bidirectional_iterator_tag, T> {
T* const* ptr;
explicit DerefPtrIterator(T* const* ptr) : ptr(ptr) {}
T& operator*() { return **ptr; }
DerefPtrIterator& operator++() {
++ptr;
return *this;
}
DerefPtrIterator& operator--() {
--ptr;
return *this;
}
bool operator!=(DerefPtrIterator other) { return ptr != other.ptr; }
};
// {Reversed} returns a container adapter usable in a range-based "for"
// statement for iterating a reversible container in reverse order.
//
@ -71,11 +89,22 @@ auto make_iterator_range(ForwardIterator begin, ForwardIterator end) {
// for (int i : base::Reversed(v)) {
// // iterates through v from back to front
// }
//
// The signature avoids binding to temporaries (T&& / const T&) on purpose. The
// lifetime of a temporary would not extend to a range-based for loop using it.
template <typename T>
auto Reversed(T& t) { // NOLINT(runtime/references): match {rbegin} and {rend}
return make_iterator_range(std::rbegin(t), std::rend(t));
}
// This overload of `Reversed` is safe even when the argument is a temporary,
// because we rely on the wrapped iterators instead of the `iterator_range`
// object itself.
template <typename T>
auto Reversed(const iterator_range<T>& t) {
return make_iterator_range(std::rbegin(t), std::rend(t));
}
} // namespace base
} // namespace v8

View File

@ -11,6 +11,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/vector.h"
namespace v8 {
namespace base {
@ -29,7 +30,8 @@ class SmallVector {
explicit SmallVector(const Allocator& allocator = Allocator())
: allocator_(allocator) {}
explicit SmallVector(size_t size, const Allocator& allocator = Allocator())
explicit V8_INLINE SmallVector(size_t size,
const Allocator& allocator = Allocator())
: allocator_(allocator) {
resize_no_init(size);
}
@ -43,10 +45,14 @@ class SmallVector {
: allocator_(allocator) {
*this = std::move(other);
}
SmallVector(std::initializer_list<T> init,
const Allocator& allocator = Allocator())
: allocator_(allocator) {
resize_no_init(init.size());
V8_INLINE SmallVector(std::initializer_list<T> init,
const Allocator& allocator = Allocator())
: SmallVector(init.size(), allocator) {
memcpy(begin_, init.begin(), sizeof(T) * init.size());
}
explicit V8_INLINE SmallVector(base::Vector<const T> init,
const Allocator& allocator = Allocator())
: SmallVector(init.size(), allocator) {
memcpy(begin_, init.begin(), sizeof(T) * init.size());
}
@ -127,6 +133,8 @@ class SmallVector {
end_ = end + 1;
}
void push_back(T x) { emplace_back(std::move(x)); }
void pop_back(size_t count = 1) {
DCHECK_GE(size(), count);
end_ -= count;

View File

@ -12,6 +12,7 @@
#include <memory>
#include <type_traits>
#include "src/base/functional.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@ -42,6 +43,21 @@ class Vector {
DCHECK_LE(to, length_);
return Vector<T>(begin() + from, to - from);
}
Vector<T> SubVectorFrom(size_t from) const {
return SubVector(from, length_);
}
template <class U>
void OverwriteWith(Vector<U> other) {
DCHECK_EQ(size(), other.size());
std::copy(other.begin(), other.end(), begin());
}
template <class U, size_t n>
void OverwriteWith(const std::array<U, n>& other) {
DCHECK_EQ(size(), other.size());
std::copy(other.begin(), other.end(), begin());
}
// Returns the length of the vector. Only use this if you really need an
// integer return value. Use {size()} otherwise.
@ -80,6 +96,13 @@ class Vector {
// Returns a pointer past the end of the data in the vector.
constexpr T* end() const { return start_ + length_; }
constexpr std::reverse_iterator<T*> rbegin() const {
return std::make_reverse_iterator(end());
}
constexpr std::reverse_iterator<T*> rend() const {
return std::make_reverse_iterator(begin());
}
// Returns a clone of this vector with a new backing store.
Vector<T> Clone() const {
T* result = new T[length_];
@ -140,6 +163,11 @@ class Vector {
size_t length_;
};
template <typename T>
V8_INLINE size_t hash_value(base::Vector<T> v) {
return hash_range(v.begin(), v.end());
}
template <typename T>
class V8_NODISCARD ScopedVector : public Vector<T> {
public:

View File

@ -6,6 +6,7 @@
#define V8_CODEGEN_MACHINE_TYPE_H_
#include <iosfwd>
#include <limits>
#include "include/v8-fast-api-calls.h"
#include "src/base/bits.h"
@ -417,6 +418,25 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeInBytes(
return 1 << ElementSizeLog2Of(rep);
}
inline constexpr int ElementSizeInBits(MachineRepresentation rep) {
return 8 * ElementSizeInBytes(rep);
}
inline constexpr uint64_t MaxUnsignedValue(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord8:
return std::numeric_limits<uint8_t>::max();
case MachineRepresentation::kWord16:
return std::numeric_limits<uint16_t>::max();
case MachineRepresentation::kWord32:
return std::numeric_limits<uint32_t>::max();
case MachineRepresentation::kWord64:
return std::numeric_limits<uint64_t>::max();
default:
UNREACHABLE();
}
}
V8_EXPORT_PRIVATE inline constexpr int ElementSizeInPointers(
MachineRepresentation rep) {
return (ElementSizeInBytes(rep) + kSystemPointerSize - 1) /

View File

@ -3295,7 +3295,7 @@ FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone,
const FrameStateInfo& state_info = FrameStateInfoOf(state->op());
int parameters = state_info.parameter_count();
int locals = state_info.local_count();
int stack = state_info.type() == FrameStateType::kUnoptimizedFunction ? 1 : 0;
int stack = state_info.stack_count();
FrameStateDescriptor* outer_state = nullptr;
if (state.outer_frame_state()->opcode() == IrOpcode::kFrameState) {

View File

@ -610,7 +610,7 @@ class FrameState : public CommonNodeWrapperBase {
DCHECK_EQ(node->opcode(), IrOpcode::kFrameState);
}
FrameStateInfo frame_state_info() const {
const FrameStateInfo& frame_state_info() const {
return FrameStateInfoOf(node()->op());
}

View File

@ -154,6 +154,9 @@ class FrameStateInfo final {
int local_count() const {
return info_ == nullptr ? 0 : info_->local_count();
}
int stack_count() const {
return type() == FrameStateType::kUnoptimizedFunction ? 1 : 0;
}
const FrameStateFunctionInfo* function_info() const { return info_; }
private:

View File

@ -25,7 +25,7 @@ inline bool CollectFeedbackInGenericLowering() {
return FLAG_turbo_collect_feedback_in_generic_lowering;
}
enum class StackCheckKind {
enum class StackCheckKind : uint8_t {
kJSFunctionEntry = 0,
kJSIterationBody,
kCodeStubAssembler,

View File

@ -75,6 +75,10 @@
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/store-store-elimination.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/graph-builder.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/recreate-schedule.h"
#include "src/compiler/type-narrowing-reducer.h"
#include "src/compiler/typed-optimization.h"
#include "src/compiler/typer.h"
@ -150,7 +154,7 @@ class PipelineData {
allocator_(isolate->allocator()),
info_(info),
debug_name_(info_->GetDebugName()),
may_have_unverifiable_graph_(false),
may_have_unverifiable_graph_(FLAG_turboshaft),
zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
graph_zone_scope_(zone_stats_, kGraphZoneName, kCompressGraphZone),
@ -168,6 +172,7 @@ class PipelineData {
assembler_options_(AssemblerOptions::Default(isolate)) {
PhaseScope scope(pipeline_statistics, "V8.TFInitPipelineData");
graph_ = graph_zone_->New<Graph>(graph_zone_);
turboshaft_graph_ = std::make_unique<turboshaft::Graph>(graph_zone_);
source_positions_ = graph_zone_->New<SourcePositionTable>(graph_);
node_origins_ = info->trace_turbo_json()
? graph_zone_->New<NodeOriginTable>(graph_)
@ -340,6 +345,8 @@ class PipelineData {
Zone* graph_zone() const { return graph_zone_; }
Graph* graph() const { return graph_; }
void set_graph(Graph* graph) { graph_ = graph; }
turboshaft::Graph& turboshaft_graph() const { return *turboshaft_graph_; }
SourcePositionTable* source_positions() const { return source_positions_; }
NodeOriginTable* node_origins() const { return node_origins_; }
MachineOperatorBuilder* machine() const { return machine_; }
@ -460,6 +467,7 @@ class PipelineData {
graph_zone_scope_.Destroy();
graph_zone_ = nullptr;
graph_ = nullptr;
turboshaft_graph_ = nullptr;
source_positions_ = nullptr;
node_origins_ = nullptr;
simplified_ = nullptr;
@ -619,6 +627,7 @@ class PipelineData {
ZoneStats::Scope graph_zone_scope_;
Zone* graph_zone_ = nullptr;
Graph* graph_ = nullptr;
std::unique_ptr<turboshaft::Graph> turboshaft_graph_ = nullptr;
SourcePositionTable* source_positions_ = nullptr;
NodeOriginTable* node_origins_ = nullptr;
SimplifiedOperatorBuilder* simplified_ = nullptr;
@ -1991,6 +2000,28 @@ struct BranchConditionDuplicationPhase {
}
};
struct BuildTurboshaftPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BuildTurboShaft)
void Run(PipelineData* data, Zone* temp_zone) {
turboshaft::BuildGraph(data->schedule(), data->graph_zone(), temp_zone,
&data->turboshaft_graph());
data->reset_schedule();
}
};
struct TurboshaftRecreateSchedulePhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftRecreateSchedule)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
auto result = turboshaft::RecreateSchedule(data->turboshaft_graph(),
linkage->GetIncomingDescriptor(),
data->graph_zone(), temp_zone);
data->set_graph(result.graph);
data->set_schedule(result.schedule);
}
};
#if V8_ENABLE_WEBASSEMBLY
struct WasmOptimizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmOptimization)
@ -2812,6 +2843,28 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
ComputeScheduledGraph();
if (FLAG_turboshaft) {
Run<BuildTurboshaftPhase>();
if (data->info()->trace_turbo_graph()) {
UnparkedScopeIfNeeded scope(data->broker());
AllowHandleDereference allow_deref;
CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
tracing_scope.stream()
<< "\n-- TurboShaft Graph ----------------------------\n"
<< data->turboshaft_graph();
}
Run<TurboshaftRecreateSchedulePhase>(linkage);
if (data->info()->trace_turbo_graph() || FLAG_trace_turbo_scheduler) {
UnparkedScopeIfNeeded scope(data->broker());
AllowHandleDereference allow_deref;
CodeTracer::StreamScope tracing_scope(data->GetCodeTracer());
tracing_scope.stream()
<< "\n-- Recreated Schedule ----------------------------\n"
<< *data->schedule();
}
}
return SelectInstructions(linkage);
}

View File

@ -0,0 +1,209 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_ASSEMBLER_H_
#define V8_COMPILER_TURBOSHAFT_ASSEMBLER_H_
#include <cstring>
#include <iterator>
#include <limits>
#include <memory>
#include <type_traits>
#include "src/base/iterator.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
// This class is used to extend an assembler with useful short-hands that still
// forward to the regular operations of the deriving assembler.
template <class Subclass, class Superclass>
class AssemblerInterface : public Superclass {
public:
using Superclass::Superclass;
using Base = Superclass;
OpIndex Add(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kAdd, rep);
}
OpIndex AddWithOverflow(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().OverflowCheckedBinop(
left, right, OverflowCheckedBinopOp::Kind::kSignedAdd, rep);
}
OpIndex Sub(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kSub, rep);
}
OpIndex SubWithOverflow(OpIndex left, OpIndex right,
MachineRepresentation rep) {
return subclass().OverflowCheckedBinop(
left, right, OverflowCheckedBinopOp::Kind::kSignedSub, rep);
}
OpIndex Mul(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kMul, rep);
}
OpIndex MulWithOverflow(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().OverflowCheckedBinop(
left, right, OverflowCheckedBinopOp::Kind::kSignedMul, rep);
}
OpIndex BitwiseAnd(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kBitwiseAnd, rep);
}
OpIndex BitwiseOr(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kBitwiseOr, rep);
}
OpIndex BitwiseXor(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kBitwiseXor, rep);
}
OpIndex ShiftLeft(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kShiftLeft, rep);
}
OpIndex Word32Constant(uint32_t value) {
return subclass().Constant(ConstantOp::Kind::kWord32, uint64_t{value});
}
OpIndex Word64Constant(uint64_t value) {
return subclass().Constant(ConstantOp::Kind::kWord64, value);
}
OpIndex IntegralConstant(uint64_t value, MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kWord32:
return Word32Constant(static_cast<uint32_t>(value));
case MachineRepresentation::kWord64:
return Word64Constant(value);
default:
UNREACHABLE();
}
}
OpIndex Float32Constant(float value) {
return subclass().Constant(ConstantOp::Kind::kFloat32, value);
}
OpIndex Float64Constant(double value) {
return subclass().Constant(ConstantOp::Kind::kFloat64, value);
}
OpIndex TrucateWord64ToWord32(OpIndex value) {
return subclass().Change(value, ChangeOp::Kind::kIntegerTruncate,
MachineRepresentation::kWord64,
MachineRepresentation::kWord32);
}
private:
Subclass& subclass() { return *static_cast<Subclass*>(this); }
};
// This empty base-class is used to provide default-implementations of plain
// methods emitting operations.
template <class Assembler>
class AssemblerBase {
public:
#define EMIT_OP(Name) \
template <class... Args> \
OpIndex Name(Args... args) { \
return static_cast<Assembler*>(this)->template Emit<Name##Op>(args...); \
}
TURBOSHAFT_OPERATION_LIST(EMIT_OP)
#undef EMIT_OP
};
class Assembler
: public AssemblerInterface<Assembler, AssemblerBase<Assembler>> {
public:
Block* NewBlock(Block::Kind kind) { return graph_.NewBlock(kind); }
V8_INLINE bool Bind(Block* block) {
if (!graph().Add(block)) return false;
DCHECK_NULL(current_block_);
current_block_ = block;
return true;
}
OpIndex Phi(base::Vector<const OpIndex> inputs, MachineRepresentation rep) {
DCHECK(current_block()->IsMerge() &&
inputs.size() == current_block()->Predecessors().size());
return Base::Phi(inputs, rep);
}
template <class... Args>
OpIndex PendingLoopPhi(Args... args) {
DCHECK(current_block()->IsLoop());
return Base::PendingLoopPhi(args...);
}
OpIndex Goto(Block* destination) {
destination->AddPredecessor(current_block());
return Base::Goto(destination);
}
OpIndex Branch(OpIndex condition, Block* if_true, Block* if_false) {
if_true->AddPredecessor(current_block());
if_false->AddPredecessor(current_block());
return Base::Branch(condition, if_true, if_false);
}
OpIndex Switch(OpIndex input, base::Vector<const SwitchOp::Case> cases,
Block* default_case) {
for (SwitchOp::Case c : cases) {
c.destination->AddPredecessor(current_block());
}
default_case->AddPredecessor(current_block());
return Base::Switch(input, cases, default_case);
}
explicit Assembler(Graph* graph, Zone* phase_zone)
: graph_(*graph), phase_zone_(phase_zone) {
graph_.Reset();
}
Block* current_block() { return current_block_; }
Zone* graph_zone() { return graph().graph_zone(); }
Graph& graph() { return graph_; }
Zone* phase_zone() { return phase_zone_; }
private:
friend class AssemblerBase<Assembler>;
void FinalizeBlock() {
graph().Finalize(current_block_);
current_block_ = nullptr;
}
template <class Op, class... Args>
OpIndex Emit(Args... args) {
STATIC_ASSERT((std::is_base_of<Operation, Op>::value));
STATIC_ASSERT(!(std::is_same<Op, Operation>::value));
DCHECK_NOT_NULL(current_block_);
OpIndex result = graph().Add<Op>(args...);
if (Op::properties.is_block_terminator) FinalizeBlock();
return result;
}
Block* current_block_ = nullptr;
Graph& graph_;
Zone* const phase_zone_;
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_ASSEMBLER_H_

View File

@ -0,0 +1,117 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_
#define V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_
#include "src/compiler/turboshaft/operations.h"
namespace v8::internal::compiler::turboshaft {
struct FrameStateData {
// The data is encoded as a pre-traversal of a tree.
enum class Instr : uint8_t {
kInput, // 1 Operand: input machine type
kUnusedRegister,
kDematerializedObject, // 2 Operands: id, field_count
kDematerializedObjectReference // 1 Operand: id
};
class Builder {
public:
void AddParentFrameState(OpIndex parent) {
DCHECK(inputs_.empty());
inlined_ = true;
inputs_.push_back(parent);
}
void AddInput(MachineType type, OpIndex input) {
instructions_.push_back(Instr::kInput);
machine_types_.push_back(type);
inputs_.push_back(input);
}
void AddUnusedRegister() {
instructions_.push_back(Instr::kUnusedRegister);
}
void AddDematerializedObjectReference(uint32_t id) {
instructions_.push_back(Instr::kDematerializedObjectReference);
int_operands_.push_back(id);
}
void AddDematerializedObject(uint32_t id, uint32_t field_count) {
instructions_.push_back(Instr::kDematerializedObject);
int_operands_.push_back(id);
int_operands_.push_back(field_count);
}
const FrameStateData* AllocateFrameStateData(
const FrameStateInfo& frame_state_info, Zone* zone) {
return zone->New<FrameStateData>(FrameStateData{
frame_state_info, zone->CloneVector(base::VectorOf(instructions_)),
zone->CloneVector(base::VectorOf(machine_types_)),
zone->CloneVector(base::VectorOf(int_operands_))});
}
base::Vector<const OpIndex> Inputs() { return base::VectorOf(inputs_); }
bool inlined() const { return inlined_; }
private:
base::SmallVector<Instr, 32> instructions_;
base::SmallVector<MachineType, 32> machine_types_;
base::SmallVector<uint32_t, 16> int_operands_;
base::SmallVector<OpIndex, 32> inputs_;
bool inlined_ = false;
};
struct Iterator {
base::Vector<const Instr> instructions;
base::Vector<const MachineType> machine_types;
base::Vector<const uint32_t> int_operands;
base::Vector<const OpIndex> inputs;
bool has_more() const { return !instructions.empty(); }
Instr current_instr() { return instructions[0]; }
void ConsumeInput(MachineType* machine_type, OpIndex* input) {
DCHECK_EQ(instructions[0], Instr::kInput);
instructions += 1;
*machine_type = machine_types[0];
machine_types += 1;
*input = inputs[0];
inputs += 1;
}
void ConsumeUnusedRegister() {
DCHECK_EQ(instructions[0], Instr::kUnusedRegister);
instructions += 1;
}
void ConsumeDematerializedObject(uint32_t* id, uint32_t* field_count) {
DCHECK_EQ(instructions[0], Instr::kDematerializedObject);
instructions += 1;
*id = int_operands[0];
*field_count = int_operands[1];
int_operands += 2;
}
void ConsumeDematerializedObjectReference(uint32_t* id) {
DCHECK_EQ(instructions[0], Instr::kDematerializedObjectReference);
instructions += 1;
*id = int_operands[0];
int_operands += 1;
}
};
Iterator iterator(base::Vector<const OpIndex> state_values) const {
return Iterator{instructions, machine_types, int_operands, state_values};
}
const FrameStateInfo& frame_state_info;
base::Vector<Instr> instructions;
base::Vector<MachineType> machine_types;
base::Vector<uint32_t> int_operands;
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_

View File

@ -0,0 +1,722 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/turboshaft/graph-builder.h"
#include <limits>
#include <numeric>
#include "src/base/logging.h"
#include "src/base/safe_conversions.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-aux-data.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
#include "src/compiler/turboshaft/assembler.h"
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
namespace v8::internal::compiler::turboshaft {
namespace {
struct GraphBuilder {
Zone* graph_zone;
Zone* phase_zone;
Schedule& schedule;
Assembler assembler;
NodeAuxData<OpIndex> op_mapping{phase_zone};
ZoneVector<Block*> block_mapping{schedule.RpoBlockCount(), phase_zone};
void Run();
private:
OpIndex Map(Node* old_node) {
OpIndex result = op_mapping.Get(old_node);
DCHECK(assembler.graph().IsValid(result));
return result;
}
Block* Map(BasicBlock* block) {
Block* result = block_mapping[block->rpo_number()];
DCHECK_NOT_NULL(result);
return result;
}
void FixLoopPhis(Block* loop, Block* backedge) {
DCHECK(loop->IsLoop());
for (Operation& op : assembler.graph().operations(*loop)) {
if (!op.Is<PendingLoopPhiOp>()) continue;
auto& pending_phi = op.Cast<PendingLoopPhiOp>();
assembler.graph().Replace<PhiOp>(
assembler.graph().Index(pending_phi),
base::VectorOf(
{pending_phi.first(), Map(pending_phi.old_backedge_node)}),
pending_phi.rep);
}
}
void ProcessDeoptInput(FrameStateData::Builder* builder, Node* input,
MachineType type) {
DCHECK_NE(input->opcode(), IrOpcode::kObjectState);
DCHECK_NE(input->opcode(), IrOpcode::kStateValues);
DCHECK_NE(input->opcode(), IrOpcode::kTypedStateValues);
if (input->opcode() == IrOpcode::kObjectId) {
builder->AddDematerializedObjectReference(ObjectIdOf(input->op()));
} else if (input->opcode() == IrOpcode::kTypedObjectState) {
const TypedObjectStateInfo& info =
OpParameter<TypedObjectStateInfo>(input->op());
int field_count = input->op()->ValueInputCount();
builder->AddDematerializedObject(info.object_id(),
static_cast<uint32_t>(field_count));
for (int i = 0; i < field_count; ++i) {
ProcessDeoptInput(builder, input->InputAt(i),
(*info.machine_types())[i]);
}
} else {
builder->AddInput(type, Map(input));
}
}
void ProcessStateValues(FrameStateData::Builder* builder,
Node* state_values) {
for (auto it = StateValuesAccess(state_values).begin(); !it.done(); ++it) {
if (Node* node = it.node()) {
ProcessDeoptInput(builder, node, (*it).type);
} else {
builder->AddUnusedRegister();
}
}
}
void BuildFrameStateData(FrameStateData::Builder* builder,
FrameState frame_state) {
if (frame_state.outer_frame_state()->opcode() != IrOpcode::kStart) {
builder->AddParentFrameState(Map(frame_state.outer_frame_state()));
}
ProcessStateValues(builder, frame_state.parameters());
ProcessStateValues(builder, frame_state.locals());
ProcessStateValues(builder, frame_state.stack());
ProcessDeoptInput(builder, frame_state.context(), MachineType::AnyTagged());
ProcessDeoptInput(builder, frame_state.function(),
MachineType::AnyTagged());
}
Block::Kind BlockKind(BasicBlock* block) {
switch (block->front()->opcode()) {
case IrOpcode::kStart:
case IrOpcode::kEnd:
case IrOpcode::kMerge:
return Block::Kind::kMerge;
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kIfValue:
case IrOpcode::kIfDefault:
case IrOpcode::kIfSuccess:
case IrOpcode::kIfException:
return Block::Kind::kBranchTarget;
case IrOpcode::kLoop:
return Block::Kind::kLoopHeader;
default:
block->front()->Print();
UNIMPLEMENTED();
}
}
OpIndex Process(Node* node, BasicBlock* block,
const base::SmallVector<int, 16>& predecessor_permutation);
};
void GraphBuilder::Run() {
for (BasicBlock* block : *schedule.rpo_order()) {
block_mapping[block->rpo_number()] = assembler.NewBlock(BlockKind(block));
}
for (BasicBlock* block : *schedule.rpo_order()) {
Block* target_block = Map(block);
if (!assembler.Bind(target_block)) continue;
target_block->SetDeferred(block->deferred());
// Since we visit blocks in rpo-order, the new block predecessors are sorted
// in rpo order too. However, the input schedule does not order
// predecessors, so we have to apply a corresponding permutation to phi
// inputs.
const BasicBlockVector& predecessors = block->predecessors();
base::SmallVector<int, 16> predecessor_permutation(predecessors.size());
std::iota(predecessor_permutation.begin(), predecessor_permutation.end(),
0);
std::sort(predecessor_permutation.begin(), predecessor_permutation.end(),
[&](size_t i, size_t j) {
return predecessors[i]->rpo_number() <
predecessors[j]->rpo_number();
});
for (Node* node : *block->nodes()) {
OpIndex i = Process(node, block, predecessor_permutation);
op_mapping.Set(node, i);
}
if (Node* node = block->control_input()) {
OpIndex i = Process(node, block, predecessor_permutation);
op_mapping.Set(node, i);
}
switch (block->control()) {
case BasicBlock::kGoto: {
DCHECK_EQ(block->SuccessorCount(), 1);
Block* destination = Map(block->SuccessorAt(0));
assembler.Goto(destination);
if (destination->IsLoop()) {
FixLoopPhis(destination, target_block);
}
break;
}
case BasicBlock::kBranch:
case BasicBlock::kSwitch:
case BasicBlock::kReturn:
case BasicBlock::kDeoptimize:
case BasicBlock::kThrow:
break;
case BasicBlock::kCall:
case BasicBlock::kTailCall:
UNIMPLEMENTED();
case BasicBlock::kNone:
UNREACHABLE();
}
DCHECK_NULL(assembler.current_block());
}
}
OpIndex GraphBuilder::Process(
Node* node, BasicBlock* block,
const base::SmallVector<int, 16>& predecessor_permutation) {
const Operator* op = node->op();
Operator::Opcode opcode = op->opcode();
switch (opcode) {
case IrOpcode::kStart:
case IrOpcode::kMerge:
case IrOpcode::kLoop:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
case IrOpcode::kIfDefault:
case IrOpcode::kIfValue:
case IrOpcode::kTypedStateValues:
case IrOpcode::kObjectId:
case IrOpcode::kTypedObjectState:
case IrOpcode::kEffectPhi:
case IrOpcode::kTerminate:
return OpIndex::Invalid();
case IrOpcode::kParameter: {
const ParameterInfo& info = ParameterInfoOf(op);
return assembler.Parameter(info.index(), info.debug_name());
}
case IrOpcode::kPhi: {
int input_count = op->ValueInputCount();
MachineRepresentation rep = PhiRepresentationOf(op);
if (assembler.current_block()->IsLoop()) {
DCHECK_EQ(input_count, 2);
return assembler.PendingLoopPhi(Map(node->InputAt(0)), rep,
node->InputAt(1));
} else {
base::SmallVector<OpIndex, 16> inputs;
for (int i = 0; i < input_count; ++i) {
inputs.push_back(Map(node->InputAt(predecessor_permutation[i])));
}
return assembler.Phi(base::VectorOf(inputs), rep);
}
}
case IrOpcode::kInt64Constant:
return assembler.Constant(
ConstantOp::Kind::kWord64,
static_cast<uint64_t>(OpParameter<int64_t>(op)));
case IrOpcode::kInt32Constant:
return assembler.Constant(
ConstantOp::Kind::kWord32,
uint64_t{static_cast<uint32_t>(OpParameter<int32_t>(op))});
case IrOpcode::kFloat64Constant:
return assembler.Constant(ConstantOp::Kind::kFloat64,
OpParameter<double>(op));
case IrOpcode::kFloat32Constant:
return assembler.Constant(ConstantOp::Kind::kFloat32,
OpParameter<float>(op));
case IrOpcode::kNumberConstant:
return assembler.Constant(ConstantOp::Kind::kNumber,
OpParameter<double>(op));
case IrOpcode::kTaggedIndexConstant:
return assembler.Constant(
ConstantOp::Kind::kTaggedIndex,
uint64_t{static_cast<uint32_t>(OpParameter<int32_t>(op))});
case IrOpcode::kHeapConstant:
return assembler.Constant(ConstantOp::Kind::kHeapObject,
HeapConstantOf(op));
case IrOpcode::kCompressedHeapConstant:
return assembler.Constant(ConstantOp::Kind::kCompressedHeapObject,
HeapConstantOf(op));
case IrOpcode::kExternalConstant:
return assembler.Constant(ConstantOp::Kind::kExternal,
OpParameter<ExternalReference>(op));
case IrOpcode::kDelayedStringConstant:
return assembler.Constant(ConstantOp::Kind::kDelayedString,
StringConstantBaseOf(op));
case IrOpcode::kWord32And:
return assembler.BitwiseAnd(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64And:
return assembler.BitwiseAnd(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kWord32Or:
return assembler.BitwiseOr(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64Or:
return assembler.BitwiseOr(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kWord32Xor:
return assembler.BitwiseXor(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64Xor:
return assembler.BitwiseXor(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kWord64Sar:
case IrOpcode::kWord32Sar: {
MachineRepresentation rep = opcode == IrOpcode::kWord64Sar
? MachineRepresentation::kWord64
: MachineRepresentation::kWord32;
ShiftOp::Kind kind;
switch (ShiftKindOf(op)) {
case ShiftKind::kShiftOutZeros:
kind = ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros;
break;
case ShiftKind::kNormal:
kind = ShiftOp::Kind::kShiftRightArithmetic;
break;
}
return assembler.Shift(Map(node->InputAt(0)), Map(node->InputAt(1)), kind,
rep);
}
case IrOpcode::kWord64Shr:
case IrOpcode::kWord32Shr: {
MachineRepresentation rep = opcode == IrOpcode::kWord64Shr
? MachineRepresentation::kWord64
: MachineRepresentation::kWord32;
return assembler.Shift(Map(node->InputAt(0)), Map(node->InputAt(1)),
ShiftOp::Kind::kShiftRightLogical, rep);
}
case IrOpcode::kWord64Shl:
case IrOpcode::kWord32Shl: {
MachineRepresentation rep = opcode == IrOpcode::kWord64Shl
? MachineRepresentation::kWord64
: MachineRepresentation::kWord32;
return assembler.Shift(Map(node->InputAt(0)), Map(node->InputAt(1)),
ShiftOp::Kind::kShiftLeft, rep);
}
case IrOpcode::kWord32Equal:
return assembler.Equal(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kWord64Equal:
return assembler.Equal(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kFloat32Equal:
return assembler.Equal(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Equal:
return assembler.Equal(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kInt32LessThan:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThan,
MachineRepresentation::kWord32);
case IrOpcode::kInt64LessThan:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThan,
MachineRepresentation::kWord64);
case IrOpcode::kUint32LessThan:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kUnsignedLessThan,
MachineRepresentation::kWord32);
case IrOpcode::kUint64LessThan:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kUnsignedLessThan,
MachineRepresentation::kWord64);
case IrOpcode::kFloat32LessThan:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThan,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64LessThan:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThan,
MachineRepresentation::kFloat64);
case IrOpcode::kInt32LessThanOrEqual:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThanOrEqual,
MachineRepresentation::kWord32);
case IrOpcode::kInt64LessThanOrEqual:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThanOrEqual,
MachineRepresentation::kWord64);
case IrOpcode::kUint32LessThanOrEqual:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kUnsignedLessThanOrEqual,
MachineRepresentation::kWord32);
case IrOpcode::kUint64LessThanOrEqual:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kUnsignedLessThanOrEqual,
MachineRepresentation::kWord64);
case IrOpcode::kFloat32LessThanOrEqual:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThanOrEqual,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64LessThanOrEqual:
return assembler.Comparison(Map(node->InputAt(0)), Map(node->InputAt(1)),
ComparisonOp::Kind::kSignedLessThanOrEqual,
MachineRepresentation::kFloat64);
case IrOpcode::kInt32Add:
return assembler.Add(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt32AddWithOverflow:
return assembler.AddWithOverflow(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt64Add:
return assembler.Add(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kInt64AddWithOverflow:
return assembler.AddWithOverflow(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kFloat64Add:
return assembler.Add(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32Add:
return assembler.Add(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kFloat32);
case IrOpcode::kInt32Mul:
return assembler.Mul(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt32MulWithOverflow:
return assembler.MulWithOverflow(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt64Mul:
return assembler.Mul(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kInt32Sub:
return assembler.Sub(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt32SubWithOverflow:
return assembler.SubWithOverflow(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord32);
case IrOpcode::kInt64Sub:
return assembler.Sub(Map(node->InputAt(0)), Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kInt64SubWithOverflow:
return assembler.SubWithOverflow(Map(node->InputAt(0)),
Map(node->InputAt(1)),
MachineRepresentation::kWord64);
case IrOpcode::kFloat32Abs:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kAbs,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Abs:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kAbs,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat32Neg:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kNegate,
MachineRepresentation::kFloat32);
case IrOpcode::kFloat64Neg:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kNegate,
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64SilenceNaN:
return assembler.FloatUnary(Map(node->InputAt(0)),
FloatUnaryOp::Kind::kSilenceNaN,
MachineRepresentation::kFloat64);
case IrOpcode::kTruncateInt64ToInt32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kIntegerTruncate,
MachineRepresentation::kWord64, MachineRepresentation::kWord32);
case IrOpcode::kBitcastWord32ToWord64:
return assembler.Change(Map(node->InputAt(0)), ChangeOp::Kind::kBitcast,
MachineRepresentation::kWord32,
MachineRepresentation::kWord64);
case IrOpcode::kChangeUint32ToUint64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kZeroExtend,
MachineRepresentation::kWord32, MachineRepresentation::kWord64);
case IrOpcode::kChangeInt32ToInt64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kSignExtend,
MachineRepresentation::kWord32, MachineRepresentation::kWord64);
case IrOpcode::kChangeInt32ToFloat64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kSignedToFloat,
MachineRepresentation::kWord32, MachineRepresentation::kFloat64);
case IrOpcode::kChangeInt64ToFloat64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kSignedToFloat,
MachineRepresentation::kWord64, MachineRepresentation::kFloat64);
case IrOpcode::kChangeUint32ToFloat64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kUnsignedToFloat,
MachineRepresentation::kWord32, MachineRepresentation::kFloat64);
case IrOpcode::kTruncateFloat64ToInt64: {
ChangeOp::Kind kind;
switch (OpParameter<TruncateKind>(op)) {
case TruncateKind::kArchitectureDefault:
kind = ChangeOp::Kind::kSignedFloatTruncate;
break;
case TruncateKind::kSetOverflowToMin:
kind = ChangeOp::Kind::kSignedFloatTruncateOverflowToMin;
break;
}
return assembler.Change(Map(node->InputAt(0)), kind,
MachineRepresentation::kFloat64,
MachineRepresentation::kWord64);
}
case IrOpcode::kTruncateFloat64ToWord32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kUnsignedFloatTruncate,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kRoundFloat64ToInt32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kSignedFloatTruncate,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kChangeFloat64ToInt32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kSignedNarrowing,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kChangeFloat64ToUint32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kUnsignedNarrowing,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kChangeFloat64ToInt64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kSignedNarrowing,
MachineRepresentation::kFloat64, MachineRepresentation::kWord64);
case IrOpcode::kChangeFloat64ToUint64:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kUnsignedNarrowing,
MachineRepresentation::kFloat64, MachineRepresentation::kWord64);
case IrOpcode::kFloat64ExtractLowWord32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kExtractLowHalf,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kFloat64ExtractHighWord32:
return assembler.Change(
Map(node->InputAt(0)), ChangeOp::Kind::kExtractHighHalf,
MachineRepresentation::kFloat64, MachineRepresentation::kWord32);
case IrOpcode::kBitcastTaggedToWord:
return assembler.TaggedBitcast(Map(node->InputAt(0)),
MachineRepresentation::kTagged,
MachineType::PointerRepresentation());
case IrOpcode::kBitcastWordToTagged:
return assembler.TaggedBitcast(Map(node->InputAt(0)),
MachineType::PointerRepresentation(),
MachineRepresentation::kTagged);
case IrOpcode::kLoad: {
MachineType loaded_rep = LoadRepresentationOf(op);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (index->opcode() == IrOpcode::kInt32Constant) {
int32_t offset = OpParameter<int32_t>(index->op());
return assembler.Load(Map(base), LoadOp::Kind::kRaw, loaded_rep,
offset);
}
if (index->opcode() == IrOpcode::kInt64Constant) {
int64_t offset = OpParameter<int64_t>(index->op());
if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
return assembler.Load(Map(base), LoadOp::Kind::kRaw, loaded_rep,
static_cast<int32_t>(offset));
}
}
int32_t offset = 0;
uint8_t element_size_log2 = 0;
return assembler.IndexedLoad(Map(base), Map(index),
IndexedLoadOp::Kind::kRaw, loaded_rep,
offset, element_size_log2);
}
case IrOpcode::kStore: {
StoreRepresentation store_rep = StoreRepresentationOf(op);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (index->opcode() == IrOpcode::kInt32Constant) {
int32_t offset = OpParameter<int32_t>(index->op());
return assembler.Store(Map(base), Map(value), StoreOp::Kind::kRaw,
store_rep.representation(),
store_rep.write_barrier_kind(), offset);
}
if (index->opcode() == IrOpcode::kInt64Constant) {
int64_t offset = OpParameter<int64_t>(index->op());
if (base::IsValueInRangeForNumericType<int32_t>(offset)) {
return assembler.Store(Map(base), Map(value), StoreOp::Kind::kRaw,
store_rep.representation(),
store_rep.write_barrier_kind(),
static_cast<int32_t>(offset));
}
}
int32_t offset = 0;
uint8_t element_size_log2 = 0;
return assembler.IndexedStore(
Map(base), Map(index), Map(value), IndexedStoreOp::Kind::kRaw,
store_rep.representation(), store_rep.write_barrier_kind(), offset,
element_size_log2);
}
case IrOpcode::kStackPointerGreaterThan:
return assembler.StackPointerGreaterThan(Map(node->InputAt(0)),
StackCheckKindOf(op));
case IrOpcode::kLoadStackCheckOffset:
return assembler.LoadStackCheckOffset();
case IrOpcode::kBranch:
DCHECK_EQ(block->SuccessorCount(), 2);
return assembler.Branch(Map(node->InputAt(0)), Map(block->SuccessorAt(0)),
Map(block->SuccessorAt(1)));
case IrOpcode::kSwitch: {
BasicBlock* default_branch = block->successors().back();
DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
size_t case_count = block->SuccessorCount() - 1;
base::SmallVector<SwitchOp::Case, 16> cases;
for (size_t i = 0; i < case_count; ++i) {
BasicBlock* branch = block->SuccessorAt(i);
const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
cases.emplace_back(p.value(), Map(branch));
}
return assembler.Switch(Map(node->InputAt(0)),
graph_zone->CloneVector(base::VectorOf(cases)),
Map(default_branch));
}
case IrOpcode::kCall: {
auto call_descriptor = CallDescriptorOf(op);
base::SmallVector<OpIndex, 16> arguments;
// The input `0` is the callee, the following value inputs are the
// arguments. `CallDescriptor::InputCount()` counts the callee and
// arguments, but excludes a possible `FrameState` input.
OpIndex callee = Map(node->InputAt(0));
for (int i = 1; i < static_cast<int>(call_descriptor->InputCount());
++i) {
arguments.emplace_back(Map(node->InputAt(i)));
}
OpIndex call =
assembler.Call(callee, base::VectorOf(arguments), call_descriptor);
if (!call_descriptor->NeedsFrameState()) return call;
FrameState frame_state{
node->InputAt(static_cast<int>(call_descriptor->InputCount()))};
assembler.CheckLazyDeopt(call, Map(frame_state));
return call;
}
case IrOpcode::kFrameState: {
FrameState frame_state{node};
FrameStateData::Builder builder;
BuildFrameStateData(&builder, frame_state);
return assembler.FrameState(
builder.Inputs(), builder.inlined(),
builder.AllocateFrameStateData(frame_state.frame_state_info(),
graph_zone));
}
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless: {
OpIndex condition = Map(node->InputAt(0));
OpIndex frame_state = Map(node->InputAt(1));
bool negated = op->opcode() == IrOpcode::kDeoptimizeUnless;
return assembler.DeoptimizeIf(condition, frame_state, negated,
&DeoptimizeParametersOf(op));
}
case IrOpcode::kDeoptimize: {
OpIndex frame_state = Map(node->InputAt(0));
return assembler.Deoptimize(frame_state, &DeoptimizeParametersOf(op));
}
case IrOpcode::kReturn: {
Node* pop_count = node->InputAt(0);
if (pop_count->opcode() != IrOpcode::kInt32Constant) {
UNIMPLEMENTED();
}
base::SmallVector<OpIndex, 4> return_values;
for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
return_values.push_back(Map(node->InputAt(i)));
}
return assembler.Return(base::VectorOf(return_values),
OpParameter<int32_t>(pop_count->op()));
}
case IrOpcode::kUnreachable:
for (Node* use : node->uses()) {
CHECK_EQ(use->opcode(), IrOpcode::kThrow);
}
return OpIndex::Invalid();
case IrOpcode::kThrow:
return assembler.Unreachable();
case IrOpcode::kProjection: {
Node* input = node->InputAt(0);
size_t index = ProjectionIndexOf(op);
switch (input->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt32MulWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
case IrOpcode::kInt64SubWithOverflow:
if (index == 0) {
return assembler.Projection(Map(input),
ProjectionOp::Kind::kResult);
} else {
DCHECK_EQ(index, 1);
return assembler.Projection(Map(input),
ProjectionOp::Kind::kOverflowBit);
}
default:
UNIMPLEMENTED();
}
}
default:
std::cout << "unsupported node type: " << *node->op() << "\n";
node->Print();
UNIMPLEMENTED();
}
}
} // namespace
void BuildGraph(Schedule* schedule, Zone* graph_zone, Zone* phase_zone,
Graph* graph) {
GraphBuilder{graph_zone, phase_zone, *schedule, Assembler(graph, phase_zone)}
.Run();
}
} // namespace v8::internal::compiler::turboshaft

View File

@ -0,0 +1,18 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_
#define V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_
#include "src/compiler/turboshaft/graph.h"
namespace v8::internal::compiler {
class Schedule;
}
namespace v8::internal::compiler::turboshaft {
void BuildGraph(Schedule* schedule, Zone* graph_zone, Zone* phase_zone,
Graph* graph);
}
#endif // V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_

View File

@ -0,0 +1,39 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/turboshaft/graph.h"
#include <iomanip>
namespace v8::internal::compiler::turboshaft {
std::ostream& operator<<(std::ostream& os, PrintAsBlockHeader block_header) {
const Block& block = block_header.block;
const char* block_type =
block.IsLoop() ? "LOOP" : block.IsMerge() ? "MERGE" : "BLOCK";
os << "\n" << block_type << " " << block.index();
if (block.IsDeferred()) os << " (deferred)";
if (!block.Predecessors().empty()) {
os << " <- ";
bool first = true;
for (const Block* pred : block.Predecessors()) {
if (!first) os << ", ";
os << pred->index();
first = false;
}
}
return os;
}
std::ostream& operator<<(std::ostream& os, const Graph& graph) {
for (const Block& block : graph.blocks()) {
os << PrintAsBlockHeader{block} << "\n";
for (const Operation& op : graph.operations(block)) {
os << std::setw(5) << graph.Index(op).id() << ": " << op << "\n";
}
}
return os;
}
} // namespace v8::internal::compiler::turboshaft

View File

@ -0,0 +1,508 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_GRAPH_H_
#define V8_COMPILER_TURBOSHAFT_GRAPH_H_
#include <algorithm>
#include <iterator>
#include <limits>
#include <memory>
#include <type_traits>
#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
class Assembler;
class VarAssembler;
// `OperationBuffer` is a growable, Zone-allocated buffer to store TurboShaft
// operations. It is part of a `Graph`.
// The buffer can be seen as an array of 8-byte `OperationStorageSlot` values.
// The structure is append-only, that is, we only add operations at the end.
// There are rare cases (i.e., loop phis) where we overwrite an existing
// operation, but only if we can guarantee that the new operation is not bigger
// than the operation we overwrite.
class OperationBuffer {
public:
// A `ReplaceScope` is to overwrite an existing operation.
// It moves the end-pointer temporarily so that the next emitted operation
// overwrites an old one.
class ReplaceScope {
public:
ReplaceScope(OperationBuffer* buffer, OpIndex replaced)
: buffer_(buffer),
replaced_(replaced),
old_end_(buffer->end_),
old_slot_count_(buffer->SlotCount(replaced)) {
buffer_->end_ = buffer_->Get(replaced);
}
~ReplaceScope() {
DCHECK_LE(buffer_->SlotCount(replaced_), old_slot_count_);
buffer_->end_ = old_end_;
// Preserve the original operation size in case it has become smaller.
buffer_->operation_sizes_[replaced_.id()] = old_slot_count_;
buffer_->operation_sizes_[OpIndex(replaced_.offset() +
static_cast<uint32_t>(old_slot_count_) *
sizeof(OperationStorageSlot))
.id() -
1] = old_slot_count_;
}
ReplaceScope(const ReplaceScope&) = delete;
ReplaceScope& operator=(const ReplaceScope&) = delete;
private:
OperationBuffer* buffer_;
OpIndex replaced_;
OperationStorageSlot* old_end_;
uint16_t old_slot_count_;
};
explicit OperationBuffer(Zone* zone, size_t initial_capacity) : zone_(zone) {
begin_ = end_ = zone_->NewArray<OperationStorageSlot>(initial_capacity);
operation_sizes_ =
zone_->NewArray<uint16_t>((initial_capacity + 1) / kSlotsPerId);
end_cap_ = begin_ + initial_capacity;
}
OperationStorageSlot* Allocate(size_t slot_count) {
if (V8_UNLIKELY(static_cast<size_t>(end_cap_ - end_) < slot_count)) {
Grow(capacity() + slot_count);
DCHECK(slot_count <= static_cast<size_t>(end_cap_ - end_));
}
OperationStorageSlot* result = end_;
end_ += slot_count;
OpIndex idx = Index(result);
// Store the size in both for the first and last id corresponding to the new
// operation. This enables iteration in both directions. The two id's are
// the same if the operation is small.
operation_sizes_[idx.id()] = slot_count;
operation_sizes_[OpIndex(idx.offset() + static_cast<uint32_t>(slot_count) *
sizeof(OperationStorageSlot))
.id() -
1] = slot_count;
return result;
}
void RemoveLast() {
size_t slot_count = operation_sizes_[EndIndex().id() - 1];
end_ -= slot_count;
DCHECK_GE(end_, begin_);
}
OpIndex Index(const Operation& op) const {
return Index(reinterpret_cast<const OperationStorageSlot*>(&op));
}
OpIndex Index(const OperationStorageSlot* ptr) const {
DCHECK(begin_ <= ptr && ptr <= end_);
return OpIndex(static_cast<uint32_t>(reinterpret_cast<Address>(ptr) -
reinterpret_cast<Address>(begin_)));
}
OperationStorageSlot* Get(OpIndex idx) {
DCHECK_LT(idx.offset() / sizeof(OperationStorageSlot), size());
return reinterpret_cast<OperationStorageSlot*>(
reinterpret_cast<Address>(begin_) + idx.offset());
}
uint16_t SlotCount(OpIndex idx) {
DCHECK_LT(idx.offset() / sizeof(OperationStorageSlot), size());
return operation_sizes_[idx.id()];
}
const OperationStorageSlot* Get(OpIndex idx) const {
DCHECK_LT(idx.offset(), capacity() * sizeof(OperationStorageSlot));
return reinterpret_cast<const OperationStorageSlot*>(
reinterpret_cast<Address>(begin_) + idx.offset());
}
OpIndex Next(OpIndex idx) const {
DCHECK_GT(operation_sizes_[idx.id()], 0);
OpIndex result = OpIndex(idx.offset() + operation_sizes_[idx.id()] *
sizeof(OperationStorageSlot));
DCHECK_LE(result.offset(), capacity() * sizeof(OperationStorageSlot));
return result;
}
OpIndex Previous(OpIndex idx) const {
DCHECK_GT(idx.id(), 0);
DCHECK_GT(operation_sizes_[idx.id() - 1], 0);
OpIndex result = OpIndex(idx.offset() - operation_sizes_[idx.id() - 1] *
sizeof(OperationStorageSlot));
DCHECK_LT(result.offset(), capacity() * sizeof(OperationStorageSlot));
return result;
}
// Offset of the first operation.
OpIndex BeginIndex() const { return OpIndex(0); }
// One-past-the-end offset.
OpIndex EndIndex() const { return Index(end_); }
uint32_t size() const { return static_cast<uint32_t>(end_ - begin_); }
uint32_t capacity() const { return static_cast<uint32_t>(end_cap_ - begin_); }
void Grow(size_t min_capacity) {
size_t size = this->size();
size_t capacity = this->capacity();
size_t new_capacity = 2 * capacity;
while (new_capacity < min_capacity) new_capacity *= 2;
CHECK_LT(new_capacity, std::numeric_limits<uint32_t>::max() /
sizeof(OperationStorageSlot));
OperationStorageSlot* new_buffer =
zone_->NewArray<OperationStorageSlot>(new_capacity);
memcpy(new_buffer, begin_, size * sizeof(OperationStorageSlot));
uint16_t* new_operation_sizes =
zone_->NewArray<uint16_t>(new_capacity / kSlotsPerId);
memcpy(new_operation_sizes, operation_sizes_,
size / kSlotsPerId * sizeof(uint16_t));
begin_ = new_buffer;
end_ = new_buffer + size;
end_cap_ = new_buffer + new_capacity;
operation_sizes_ = new_operation_sizes;
}
void Reset() { end_ = begin_; }
private:
Zone* zone_;
OperationStorageSlot* begin_;
OperationStorageSlot* end_;
OperationStorageSlot* end_cap_;
uint16_t* operation_sizes_;
};
// A basic block
class Block {
public:
enum class Kind : uint8_t { kMerge, kLoopHeader, kBranchTarget };
bool IsLoopOrMerge() const { return IsLoop() || IsMerge(); }
bool IsLoop() const { return kind_ == Kind::kLoopHeader; }
bool IsMerge() const { return kind_ == Kind::kMerge; }
bool IsHandler() const { return false; }
bool IsSwitchCase() const { return false; }
Kind kind() const { return kind_; }
BlockIndex index() const { return index_; }
bool IsDeferred() const { return deferred_; }
void SetDeferred(bool deferred) { deferred_ = deferred; }
bool Contains(OpIndex op_idx) const {
return begin_ <= op_idx && op_idx < end_;
}
bool IsBound() const { return index_ != BlockIndex::Invalid(); }
void AddPredecessor(Block* predecessor) {
DCHECK(!IsBound() ||
(Predecessors().size() == 1 && kind_ == Kind::kLoopHeader));
DCHECK_EQ(predecessor->neighboring_predecessor_, nullptr);
predecessor->neighboring_predecessor_ = last_predecessor_;
last_predecessor_ = predecessor;
}
base::SmallVector<Block*, 8> Predecessors() const {
base::SmallVector<Block*, 8> result;
for (Block* pred = last_predecessor_; pred != nullptr;
pred = pred->neighboring_predecessor_) {
result.push_back(pred);
}
std::reverse(result.begin(), result.end());
return result;
}
bool HasPredecessors() const { return last_predecessor_ != nullptr; }
OpIndex begin() const {
DCHECK(begin_.valid());
return begin_;
}
OpIndex end() const {
DCHECK(end_.valid());
return end_;
}
explicit Block(Kind kind) : kind_(kind) {}
private:
friend class Graph;
Kind kind_;
bool deferred_ = false;
OpIndex begin_ = OpIndex::Invalid();
OpIndex end_ = OpIndex::Invalid();
BlockIndex index_ = BlockIndex::Invalid();
Block* last_predecessor_ = nullptr;
Block* neighboring_predecessor_ = nullptr;
#ifdef DEBUG
Graph* graph_ = nullptr;
#endif
};
class Graph {
public:
// A big initial capacity prevents many growing steps. It also makes sense
// because the graph and its memory is recycled for following phases.
explicit Graph(Zone* graph_zone, size_t initial_capacity = 2048)
: operations_(graph_zone, initial_capacity),
bound_blocks_(graph_zone),
all_blocks_(graph_zone),
graph_zone_(graph_zone) {}
// Reset the graph to recycle its memory.
void Reset() {
operations_.Reset();
bound_blocks_.clear();
next_block_ = 0;
}
const Operation& Get(OpIndex i) const {
// `Operation` contains const fields and can be overwritten with placement
// new. Therefore, std::launder is necessary to avoid undefined behavior.
const Operation* ptr =
std::launder(reinterpret_cast<const Operation*>(operations_.Get(i)));
// Detect invalid memory by checking if opcode is valid.
DCHECK_LT(OpcodeIndex(ptr->opcode), kNumberOfOpcodes);
return *ptr;
}
Operation& Get(OpIndex i) {
// `Operation` contains const fields and can be overwritten with placement
// new. Therefore, std::launder is necessary to avoid undefined behavior.
Operation* ptr =
std::launder(reinterpret_cast<Operation*>(operations_.Get(i)));
// Detect invalid memory by checking if opcode is valid.
DCHECK_LT(OpcodeIndex(ptr->opcode), kNumberOfOpcodes);
return *ptr;
}
const Block& StartBlock() const { return Get(BlockIndex(0)); }
Block& Get(BlockIndex i) {
DCHECK_LT(i.id(), bound_blocks_.size());
return *bound_blocks_[i.id()];
}
const Block& Get(BlockIndex i) const {
DCHECK_LT(i.id(), bound_blocks_.size());
return *bound_blocks_[i.id()];
}
OpIndex Index(const Operation& op) const { return operations_.Index(op); }
OperationStorageSlot* Allocate(size_t slot_count) {
return operations_.Allocate(slot_count);
}
void RemoveLast() { operations_.RemoveLast(); }
template <class Op, class... Args>
V8_INLINE OpIndex Add(Args... args) {
OpIndex result = next_operation_index();
Op& op = Op::New(this, args...);
USE(op);
DCHECK_EQ(result, Index(op));
#ifdef DEBUG
for (OpIndex input : op.inputs()) {
DCHECK_LT(input, result);
}
#endif // DEBUG
return result;
}
template <class Op, class... Args>
void Replace(OpIndex replaced, Args... args) {
STATIC_ASSERT((std::is_base_of<Operation, Op>::value));
STATIC_ASSERT(std::is_trivially_destructible<Op>::value);
OperationBuffer::ReplaceScope replace_scope(&operations_, replaced);
Op::New(this, args...);
}
V8_INLINE Block* NewBlock(Block::Kind kind) {
if (V8_UNLIKELY(next_block_ == all_blocks_.size())) {
constexpr size_t new_block_count = 64;
Block* blocks = graph_zone_->NewArray<Block>(new_block_count);
for (size_t i = 0; i < new_block_count; ++i) {
all_blocks_.push_back(&blocks[i]);
}
}
Block* result = all_blocks_[next_block_++];
*result = Block(kind);
#ifdef DEBUG
result->graph_ = this;
#endif
return result;
}
bool Add(Block* block) {
DCHECK_EQ(block->graph_, this);
if (!bound_blocks_.empty() && !block->HasPredecessors()) return false;
bool deferred = true;
for (Block* pred = block->last_predecessor_; pred != nullptr;
pred = pred->neighboring_predecessor_) {
if (!pred->IsDeferred()) {
deferred = false;
break;
}
}
block->SetDeferred(deferred);
DCHECK(!block->begin_.valid());
block->begin_ = next_operation_index();
DCHECK_EQ(block->index_, BlockIndex::Invalid());
block->index_ = BlockIndex(static_cast<uint32_t>(bound_blocks_.size()));
bound_blocks_.push_back(block);
return true;
}
void Finalize(Block* block) {
DCHECK(!block->end_.valid());
block->end_ = next_operation_index();
}
OpIndex next_operation_index() const { return operations_.EndIndex(); }
Zone* graph_zone() const { return graph_zone_; }
uint32_t block_count() const {
return static_cast<uint32_t>(bound_blocks_.size());
}
uint32_t op_id_count() const {
return (operations_.size() + (kSlotsPerId - 1)) / kSlotsPerId;
}
uint32_t op_id_capacity() const {
return operations_.capacity() / kSlotsPerId;
}
template <class OperationT, typename GraphT>
class OperationIterator
: public base::iterator<std::bidirectional_iterator_tag, OperationT> {
public:
static_assert(std::is_same_v<std::remove_const_t<OperationT>, Operation> &&
std::is_same_v<std::remove_const_t<GraphT>, Graph>);
using value_type = OperationT;
explicit OperationIterator(OpIndex index, GraphT* graph)
: index_(index), graph_(graph) {}
value_type& operator*() { return graph_->Get(index_); }
OperationIterator& operator++() {
index_ = graph_->operations_.Next(index_);
return *this;
}
OperationIterator& operator--() {
index_ = graph_->operations_.Previous(index_);
return *this;
}
bool operator!=(OperationIterator other) const {
DCHECK_EQ(graph_, other.graph_);
return index_ != other.index_;
}
bool operator==(OperationIterator other) const { return !(*this != other); }
OpIndex Index() const { return index_; }
private:
OpIndex index_;
GraphT* const graph_;
};
using MutableOperationIterator = OperationIterator<Operation, Graph>;
using ConstOperationIterator =
OperationIterator<const Operation, const Graph>;
base::iterator_range<MutableOperationIterator> AllOperations() {
return operations(operations_.BeginIndex(), operations_.EndIndex());
}
base::iterator_range<ConstOperationIterator> AllOperations() const {
return operations(operations_.BeginIndex(), operations_.EndIndex());
}
base::iterator_range<MutableOperationIterator> operations(
const Block& block) {
return operations(block.begin_, block.end_);
}
base::iterator_range<ConstOperationIterator> operations(
const Block& block) const {
return operations(block.begin_, block.end_);
}
base::iterator_range<ConstOperationIterator> operations(OpIndex begin,
OpIndex end) const {
return {ConstOperationIterator(begin, this),
ConstOperationIterator(end, this)};
}
base::iterator_range<MutableOperationIterator> operations(OpIndex begin,
OpIndex end) {
return {MutableOperationIterator(begin, this),
MutableOperationIterator(end, this)};
}
base::iterator_range<base::DerefPtrIterator<Block>> blocks() {
return {
base::DerefPtrIterator(bound_blocks_.data()),
base::DerefPtrIterator(bound_blocks_.data() + bound_blocks_.size())};
}
base::iterator_range<base::DerefPtrIterator<const Block>> blocks() const {
return {base::DerefPtrIterator<const Block>(bound_blocks_.data()),
base::DerefPtrIterator<const Block>(bound_blocks_.data() +
bound_blocks_.size())};
}
bool IsValid(OpIndex i) const { return i < next_operation_index(); }
Graph& GetOrCreateCompanion() {
if (!companion_) {
companion_ = std::make_unique<Graph>(graph_zone_, operations_.size());
}
return *companion_;
}
// Swap the graph with its companion graph to turn the output of one phase
// into the input of the next phase.
void SwapWithCompanion() {
Graph& companion = GetOrCreateCompanion();
std::swap(operations_, companion.operations_);
std::swap(bound_blocks_, companion.bound_blocks_);
std::swap(all_blocks_, companion.all_blocks_);
std::swap(next_block_, companion.next_block_);
std::swap(graph_zone_, companion.graph_zone_);
}
private:
bool InputsValid(const Operation& op) const {
for (OpIndex i : op.inputs()) {
if (!IsValid(i)) return false;
}
return true;
}
OperationBuffer operations_;
ZoneVector<Block*> bound_blocks_;
ZoneVector<Block*> all_blocks_;
size_t next_block_ = 0;
Zone* graph_zone_;
std::unique_ptr<Graph> companion_ = {};
};
V8_INLINE OperationStorageSlot* AllocateOpStorage(Graph* graph,
size_t slot_count) {
return graph->Allocate(slot_count);
}
struct PrintAsBlockHeader {
const Block& block;
};
std::ostream& operator<<(std::ostream& os, PrintAsBlockHeader block);
std::ostream& operator<<(std::ostream& os, const Graph& graph);
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_GRAPH_H_

View File

@ -0,0 +1,345 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/turboshaft/operations.h"
#include <atomic>
#include <sstream>
#include "src/base/platform/platform.h"
#include "src/common/assert-scope.h"
#include "src/compiler/frame-states.h"
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/handles/handles-inl.h"
namespace v8::internal::compiler::turboshaft {
const char* OpcodeName(Opcode opcode) {
#define OPCODE_NAME(Name) #Name,
const char* table[kNumberOfOpcodes] = {
TURBOSHAFT_OPERATION_LIST(OPCODE_NAME)};
#undef OPCODE_NAME
return table[OpcodeIndex(opcode)];
}
std::ostream& operator<<(std::ostream& os, OperationPrintStyle styled_op) {
const Operation& op = styled_op.op;
os << OpcodeName(op.opcode) << "(";
bool first = true;
for (OpIndex input : op.inputs()) {
if (!first) os << ", ";
first = false;
os << styled_op.op_index_prefix << input.id();
}
os << ")";
switch (op.opcode) {
#define SWITCH_CASE(Name) \
case Opcode::k##Name: \
op.Cast<Name##Op>().PrintOptions(os); \
break;
TURBOSHAFT_OPERATION_LIST(SWITCH_CASE)
#undef SWITCH_CASE
}
return os;
}
std::ostream& operator<<(std::ostream& os, FloatUnaryOp::Kind kind) {
switch (kind) {
case FloatUnaryOp::Kind::kAbs:
return os << "Abs";
case FloatUnaryOp::Kind::kNegate:
return os << "Negate";
case FloatUnaryOp::Kind::kSilenceNaN:
return os << "SilenceNaN";
}
}
std::ostream& operator<<(std::ostream& os, ShiftOp::Kind kind) {
switch (kind) {
case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
return os << "ShiftRightArithmeticShiftOutZeros";
case ShiftOp::Kind::kShiftRightArithmetic:
return os << "ShiftRightArithmetic";
case ShiftOp::Kind::kShiftRightLogical:
return os << "ShiftRightLogical";
case ShiftOp::Kind::kShiftLeft:
return os << "ShiftLeft";
}
}
std::ostream& operator<<(std::ostream& os, ComparisonOp::Kind kind) {
switch (kind) {
case ComparisonOp::Kind::kSignedLessThan:
return os << "SignedLessThan";
case ComparisonOp::Kind::kSignedLessThanOrEqual:
return os << "SignedLessThanOrEqual";
case ComparisonOp::Kind::kUnsignedLessThan:
return os << "UnsignedLessThan";
case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
return os << "UnsignedLessThanOrEqual";
}
}
std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
switch (kind) {
case ChangeOp::Kind::kSignedNarrowing:
return os << "SignedNarrowing";
case ChangeOp::Kind::kUnsignedNarrowing:
return os << "UnsignedNarrowing";
case ChangeOp::Kind::kIntegerTruncate:
return os << "IntegerTruncate";
case ChangeOp::Kind::kSignedFloatTruncate:
return os << "SignedFloatTruncate";
case ChangeOp::Kind::kUnsignedFloatTruncate:
return os << "UnsignedFloatTruncate";
case ChangeOp::Kind::kSignedFloatTruncateOverflowToMin:
return os << "SignedFloatTruncateOverflowToMin";
case ChangeOp::Kind::kSignedToFloat:
return os << "SignedToFloat";
case ChangeOp::Kind::kUnsignedToFloat:
return os << "UnsignedToFloat";
case ChangeOp::Kind::kExtractHighHalf:
return os << "ExtractHighHalf";
case ChangeOp::Kind::kExtractLowHalf:
return os << "ExtractLowHalf";
case ChangeOp::Kind::kZeroExtend:
return os << "ZeroExtend";
case ChangeOp::Kind::kSignExtend:
return os << "SignExtend";
case ChangeOp::Kind::kBitcast:
return os << "Bitcast";
}
}
std::ostream& operator<<(std::ostream& os, ProjectionOp::Kind kind) {
switch (kind) {
case ProjectionOp::Kind::kOverflowBit:
return os << "overflow bit";
case ProjectionOp::Kind::kResult:
return os << "result";
}
}
void PendingLoopPhiOp::PrintOptions(std::ostream& os) const {
os << "[" << rep << ", #o" << old_backedge_index.id() << "]";
}
void ConstantOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kWord32:
os << "word32: " << static_cast<int32_t>(storage.integral);
break;
case Kind::kWord64:
os << "word64: " << static_cast<int64_t>(storage.integral);
break;
case Kind::kNumber:
os << "number: " << number();
break;
case Kind::kTaggedIndex:
os << "tagged index: " << tagged_index();
break;
case Kind::kFloat64:
os << "float64: " << float64();
break;
case Kind::kFloat32:
os << "float32: " << float32();
break;
case Kind::kExternal:
os << "external: " << external_reference();
break;
case Kind::kHeapObject:
os << "heap object: " << handle();
break;
case Kind::kCompressedHeapObject:
os << "compressed heap object: " << handle();
break;
case Kind::kDelayedString:
os << delayed_string();
break;
}
os << "]";
}
void LoadOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << ", " << loaded_rep;
if (offset != 0) os << ", offset: " << offset;
os << "]";
}
void ParameterOp::PrintOptions(std::ostream& os) const {
os << "[" << parameter_index;
if (debug_name) os << ", " << debug_name;
os << "]";
}
void IndexedLoadOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << ", " << loaded_rep;
if (element_size_log2 != 0)
os << ", element size: 2^" << int{element_size_log2};
if (offset != 0) os << ", offset: " << offset;
os << "]";
}
void StoreOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << ", " << stored_rep;
os << ", " << write_barrier;
if (offset != 0) os << ", offset: " << offset;
os << "]";
}
void IndexedStoreOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << ", " << stored_rep;
os << ", " << write_barrier;
if (element_size_log2 != 0)
os << ", element size: 2^" << int{element_size_log2};
if (offset != 0) os << ", offset: " << offset;
os << "]";
}
void FrameStateOp::PrintOptions(std::ostream& os) const {
os << "[";
os << (inlined ? "inlined" : "not inlined");
os << ", ";
os << data->frame_state_info;
os << ", state values:";
FrameStateData::Iterator it = data->iterator(state_values());
while (it.has_more()) {
os << " ";
switch (it.current_instr()) {
case FrameStateData::Instr::kInput: {
MachineType type;
OpIndex input;
it.ConsumeInput(&type, &input);
os << "#" << input.id() << "(" << type << ")";
break;
}
case FrameStateData::Instr::kUnusedRegister:
it.ConsumeUnusedRegister();
os << ".";
break;
case FrameStateData::Instr::kDematerializedObject: {
uint32_t id;
uint32_t field_count;
it.ConsumeDematerializedObject(&id, &field_count);
os << "$" << id << "(field count: " << field_count << ")";
break;
}
case FrameStateData::Instr::kDematerializedObjectReference: {
uint32_t id;
it.ConsumeDematerializedObjectReference(&id);
os << "$" << id;
break;
}
}
}
os << "]";
}
void BinopOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kAdd:
os << "add, ";
break;
case Kind::kSub:
os << "sub, ";
break;
case Kind::kMul:
os << "signed mul, ";
break;
case Kind::kBitwiseAnd:
os << "bitwise and, ";
break;
case Kind::kBitwiseOr:
os << "bitwise or, ";
break;
case Kind::kBitwiseXor:
os << "bitwise xor, ";
break;
}
os << rep;
os << "]";
}
void OverflowCheckedBinopOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kSignedAdd:
os << "signed add, ";
break;
case Kind::kSignedSub:
os << "signed sub, ";
break;
case Kind::kSignedMul:
os << "signed mul, ";
break;
}
os << rep;
os << "]";
}
std::ostream& operator<<(std::ostream& os, BlockIndex b) {
if (!b.valid()) {
return os << "<invalid block>";
}
return os << 'B' << b.id();
}
std::ostream& operator<<(std::ostream& os, const Block* b) {
return os << b->index();
}
void SwitchOp::PrintOptions(std::ostream& os) const {
os << "[";
for (const Case& c : cases) {
os << "case " << c.value << ": " << c.destination << ", ";
}
os << " default: " << default_case << "]";
}
std::string Operation::ToString() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
} // namespace v8::internal::compiler::turboshaft

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,906 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/turboshaft/recreate-schedule.h"
#include "src/base/safe_conversions.h"
#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
#include "src/base/vector.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
namespace {
struct ScheduleBuilder {
const Graph& input_graph;
CallDescriptor* call_descriptor;
Zone* graph_zone;
Zone* phase_zone;
const size_t node_count_estimate =
static_cast<size_t>(1.1 * input_graph.op_id_count());
Schedule* const schedule =
graph_zone->New<Schedule>(graph_zone, node_count_estimate);
compiler::Graph* const tf_graph =
graph_zone->New<compiler::Graph>(graph_zone);
compiler::MachineOperatorBuilder machine{
graph_zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()};
compiler::CommonOperatorBuilder common{graph_zone};
compiler::SimplifiedOperatorBuilder simplified{graph_zone};
compiler::BasicBlock* current_block = schedule->start();
const Block* current_input_block = nullptr;
ZoneUnorderedMap<int, Node*> parameters{phase_zone};
std::vector<BasicBlock*> blocks = {};
std::vector<Node*> nodes{input_graph.op_id_count()};
std::vector<std::pair<Node*, OpIndex>> loop_phis = {};
RecreateScheduleResult Run();
Node* MakeNode(const Operator* op, base::Vector<Node* const> inputs);
Node* MakeNode(const Operator* op, std::initializer_list<Node*> inputs) {
return MakeNode(op, base::VectorOf(inputs));
}
Node* AddNode(const Operator* op, base::Vector<Node* const> inputs);
Node* AddNode(const Operator* op, std::initializer_list<Node*> inputs) {
return AddNode(op, base::VectorOf(inputs));
}
Node* GetNode(OpIndex i) { return nodes[i.id()]; }
BasicBlock* GetBlock(const Block& block) {
return blocks[block.index().id()];
}
Node* IntPtrConstant(intptr_t value) {
return AddNode(machine.Is64() ? common.Int64Constant(value)
: common.Int32Constant(
base::checked_cast<int32_t>(value)),
{});
}
Node* IntPtrAdd(Node* a, Node* b) {
return AddNode(machine.Is64() ? machine.Int64Add() : machine.Int32Add(),
{a, b});
}
Node* IntPtrShl(Node* a, Node* b) {
return AddNode(machine.Is64() ? machine.Word64Shl() : machine.Word32Shl(),
{a, b});
}
void ProcessOperation(const Operation& op);
#define DECL_PROCESS_OPERATION(Name) Node* ProcessOperation(const Name##Op& op);
TURBOSHAFT_OPERATION_LIST(DECL_PROCESS_OPERATION)
#undef DECL_PROCESS_OPERATION
std::pair<Node*, MachineType> BuildDeoptInput(FrameStateData::Iterator* it);
Node* BuildStateValues(FrameStateData::Iterator* it, int32_t size);
Node* BuildTaggedInput(FrameStateData::Iterator* it);
};
Node* ScheduleBuilder::MakeNode(const Operator* op,
base::Vector<Node* const> inputs) {
Node* node = tf_graph->NewNodeUnchecked(op, static_cast<int>(inputs.size()),
inputs.data());
return node;
}
Node* ScheduleBuilder::AddNode(const Operator* op,
base::Vector<Node* const> inputs) {
DCHECK_NOT_NULL(current_block);
Node* node = MakeNode(op, inputs);
schedule->AddNode(current_block, node);
return node;
}
RecreateScheduleResult ScheduleBuilder::Run() {
DCHECK_GE(input_graph.block_count(), 1);
// The schedule needs to contain an dummy end block because the register
// allocator expects this. This block is not actually reachable with control
// flow. It is added here because the TurboShaft grahp doesn't contain such a
// block.
blocks.reserve(input_graph.block_count() + 1);
blocks.push_back(current_block);
for (size_t i = 1; i < input_graph.block_count(); ++i) {
blocks.push_back(schedule->NewBasicBlock());
}
blocks.push_back(schedule->end());
DCHECK_EQ(blocks.size(), input_graph.block_count() + 1);
// The value output count of the start node does not actually matter.
tf_graph->SetStart(tf_graph->NewNode(common.Start(0)));
tf_graph->SetEnd(tf_graph->NewNode(common.End(0)));
for (const Block& block : input_graph.blocks()) {
current_input_block = &block;
current_block = GetBlock(block);
current_block->set_deferred(current_input_block->IsDeferred());
for (const Operation& op : input_graph.operations(block)) {
DCHECK_NOT_NULL(current_block);
ProcessOperation(op);
}
}
for (auto& p : loop_phis) {
p.first->ReplaceInput(1, GetNode(p.second));
}
DCHECK(schedule->rpo_order()->empty());
Scheduler::ComputeSpecialRPO(phase_zone, schedule);
Scheduler::GenerateDominatorTree(schedule);
DCHECK_EQ(schedule->rpo_order()->size(), blocks.size());
return {tf_graph, schedule};
}
void ScheduleBuilder::ProcessOperation(const Operation& op) {
Node* node;
switch (op.opcode) {
#define SWITCH_CASE(Name) \
case Opcode::k##Name: \
node = ProcessOperation(op.Cast<Name##Op>()); \
break;
TURBOSHAFT_OPERATION_LIST(SWITCH_CASE)
#undef SWITCH_CASE
}
nodes[input_graph.Index(op).id()] = node;
}
Node* ScheduleBuilder::ProcessOperation(const BinopOp& op) {
const Operator* o;
switch (op.rep) {
case MachineRepresentation::kWord32:
switch (op.kind) {
case BinopOp::Kind::kAdd:
o = machine.Int32Add();
break;
case BinopOp::Kind::kSub:
o = machine.Int32Sub();
break;
case BinopOp::Kind::kMul:
o = machine.Int32Mul();
break;
case BinopOp::Kind::kBitwiseAnd:
o = machine.Word32And();
break;
case BinopOp::Kind::kBitwiseOr:
o = machine.Word32Or();
break;
case BinopOp::Kind::kBitwiseXor:
o = machine.Word32Xor();
break;
}
break;
case MachineRepresentation::kWord64:
switch (op.kind) {
case BinopOp::Kind::kAdd:
o = machine.Int64Add();
break;
case BinopOp::Kind::kSub:
o = machine.Int64Sub();
break;
case BinopOp::Kind::kMul:
o = machine.Int64Mul();
break;
case BinopOp::Kind::kBitwiseAnd:
o = machine.Word64And();
break;
case BinopOp::Kind::kBitwiseOr:
o = machine.Word64Or();
break;
case BinopOp::Kind::kBitwiseXor:
o = machine.Word64Xor();
break;
}
break;
case MachineRepresentation::kFloat32:
switch (op.kind) {
case BinopOp::Kind::kAdd:
o = machine.Float32Add();
break;
case BinopOp::Kind::kSub:
o = machine.Float32Sub();
break;
case BinopOp::Kind::kMul:
o = machine.Float32Mul();
break;
case BinopOp::Kind::kBitwiseAnd:
case BinopOp::Kind::kBitwiseOr:
case BinopOp::Kind::kBitwiseXor:
UNREACHABLE();
}
break;
case MachineRepresentation::kFloat64:
switch (op.kind) {
case BinopOp::Kind::kAdd:
o = machine.Float64Add();
break;
case BinopOp::Kind::kSub:
o = machine.Float64Sub();
break;
case BinopOp::Kind::kMul:
o = machine.Float64Mul();
break;
case BinopOp::Kind::kBitwiseAnd:
case BinopOp::Kind::kBitwiseOr:
case BinopOp::Kind::kBitwiseXor:
UNREACHABLE();
}
break;
default:
UNREACHABLE();
}
return AddNode(o, {GetNode(op.left()), GetNode(op.right())});
}
Node* ScheduleBuilder::ProcessOperation(const OverflowCheckedBinopOp& op) {
const Operator* o;
switch (op.rep) {
case MachineRepresentation::kWord32:
switch (op.kind) {
case OverflowCheckedBinopOp::Kind::kSignedAdd:
o = machine.Int32AddWithOverflow();
break;
case OverflowCheckedBinopOp::Kind::kSignedSub:
o = machine.Int32SubWithOverflow();
break;
case OverflowCheckedBinopOp::Kind::kSignedMul:
o = machine.Int32MulWithOverflow();
break;
}
break;
case MachineRepresentation::kWord64:
switch (op.kind) {
case OverflowCheckedBinopOp::Kind::kSignedAdd:
o = machine.Int64AddWithOverflow();
break;
case OverflowCheckedBinopOp::Kind::kSignedSub:
o = machine.Int64SubWithOverflow();
break;
case OverflowCheckedBinopOp::Kind::kSignedMul:
UNREACHABLE();
}
break;
default:
UNREACHABLE();
}
return AddNode(o, {GetNode(op.left()), GetNode(op.right())});
}
Node* ScheduleBuilder::ProcessOperation(const FloatUnaryOp& op) {
const Operator* o;
switch (op.kind) {
case FloatUnaryOp::Kind::kAbs:
switch (op.rep) {
case MachineRepresentation::kFloat32:
o = machine.Float32Abs();
break;
case MachineRepresentation::kFloat64:
o = machine.Float64Abs();
break;
default:
UNREACHABLE();
}
break;
case FloatUnaryOp::Kind::kNegate:
switch (op.rep) {
case MachineRepresentation::kFloat32:
o = machine.Float32Neg();
break;
case MachineRepresentation::kFloat64:
o = machine.Float64Neg();
break;
default:
UNREACHABLE();
}
break;
case FloatUnaryOp::Kind::kSilenceNaN:
DCHECK_EQ(op.rep, MachineRepresentation::kFloat64);
o = machine.Float64SilenceNaN();
break;
}
return AddNode(o, {GetNode(op.input())});
}
Node* ScheduleBuilder::ProcessOperation(const ShiftOp& op) {
const Operator* o;
switch (op.rep) {
case MachineRepresentation::kWord32:
switch (op.kind) {
case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
o = machine.Word32SarShiftOutZeros();
break;
case ShiftOp::Kind::kShiftRightArithmetic:
o = machine.Word32Sar();
break;
case ShiftOp::Kind::kShiftRightLogical:
o = machine.Word32Shr();
break;
case ShiftOp::Kind::kShiftLeft:
o = machine.Word32Shl();
break;
}
break;
case MachineRepresentation::kWord64:
switch (op.kind) {
case ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros:
o = machine.Word64SarShiftOutZeros();
break;
case ShiftOp::Kind::kShiftRightArithmetic:
o = machine.Word64Sar();
break;
case ShiftOp::Kind::kShiftRightLogical:
o = machine.Word64Shr();
break;
case ShiftOp::Kind::kShiftLeft:
o = machine.Word64Shl();
break;
}
break;
default:
UNREACHABLE();
}
return AddNode(o, {GetNode(op.left()), GetNode(op.right())});
}
Node* ScheduleBuilder::ProcessOperation(const EqualOp& op) {
const Operator* o;
switch (op.rep) {
case MachineRepresentation::kWord32:
o = machine.Word32Equal();
break;
case MachineRepresentation::kWord64:
o = machine.Word64Equal();
break;
case MachineRepresentation::kFloat32:
o = machine.Float32Equal();
break;
case MachineRepresentation::kFloat64:
o = machine.Float64Equal();
break;
default:
UNREACHABLE();
}
return AddNode(o, {GetNode(op.left()), GetNode(op.right())});
}
Node* ScheduleBuilder::ProcessOperation(const ComparisonOp& op) {
const Operator* o;
switch (op.rep) {
case MachineRepresentation::kWord32:
switch (op.kind) {
case ComparisonOp::Kind::kSignedLessThan:
o = machine.Int32LessThan();
break;
case ComparisonOp::Kind::kSignedLessThanOrEqual:
o = machine.Int32LessThanOrEqual();
break;
case ComparisonOp::Kind::kUnsignedLessThan:
o = machine.Uint32LessThan();
break;
case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
o = machine.Uint32LessThanOrEqual();
break;
}
break;
case MachineRepresentation::kWord64:
switch (op.kind) {
case ComparisonOp::Kind::kSignedLessThan:
o = machine.Int64LessThan();
break;
case ComparisonOp::Kind::kSignedLessThanOrEqual:
o = machine.Int64LessThanOrEqual();
break;
case ComparisonOp::Kind::kUnsignedLessThan:
o = machine.Uint64LessThan();
break;
case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
o = machine.Uint64LessThanOrEqual();
break;
}
break;
case MachineRepresentation::kFloat32:
switch (op.kind) {
case ComparisonOp::Kind::kSignedLessThan:
o = machine.Float32LessThan();
break;
case ComparisonOp::Kind::kSignedLessThanOrEqual:
o = machine.Float32LessThanOrEqual();
break;
case ComparisonOp::Kind::kUnsignedLessThan:
case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
UNREACHABLE();
}
break;
case MachineRepresentation::kFloat64:
switch (op.kind) {
case ComparisonOp::Kind::kSignedLessThan:
o = machine.Float64LessThan();
break;
case ComparisonOp::Kind::kSignedLessThanOrEqual:
o = machine.Float64LessThanOrEqual();
break;
case ComparisonOp::Kind::kUnsignedLessThan:
case ComparisonOp::Kind::kUnsignedLessThanOrEqual:
UNREACHABLE();
}
break;
default:
UNREACHABLE();
}
return AddNode(o, {GetNode(op.left()), GetNode(op.right())});
}
Node* ScheduleBuilder::ProcessOperation(const ChangeOp& op) {
const Operator* o;
switch (op.kind) {
using Kind = ChangeOp::Kind;
case Kind::kIntegerTruncate:
if (op.from == MachineRepresentation::kWord64 &&
op.to == MachineRepresentation::kWord32) {
o = machine.TruncateInt64ToInt32();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignedFloatTruncate:
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
o = machine.TruncateFloat64ToInt64(TruncateKind::kArchitectureDefault);
} else if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
o = machine.RoundFloat64ToInt32();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignedFloatTruncateOverflowToMin:
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
o = machine.TruncateFloat64ToInt64(TruncateKind::kSetOverflowToMin);
} else {
UNIMPLEMENTED();
}
break;
case Kind::kUnsignedFloatTruncate:
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
o = machine.TruncateFloat64ToWord32();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignedToFloat:
if (op.from == MachineRepresentation::kWord32 &&
op.to == MachineRepresentation::kFloat64) {
o = machine.ChangeInt32ToFloat64();
} else if (op.from == MachineRepresentation::kWord64 &&
op.to == MachineRepresentation::kFloat64) {
o = machine.ChangeInt64ToFloat64();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kUnsignedToFloat:
if (op.from == MachineRepresentation::kWord32 &&
op.to == MachineRepresentation::kFloat64) {
o = machine.ChangeUint32ToFloat64();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kExtractHighHalf:
DCHECK_EQ(op.from, MachineRepresentation::kFloat64);
DCHECK_EQ(op.to, MachineRepresentation::kWord32);
o = machine.Float64ExtractHighWord32();
break;
case Kind::kExtractLowHalf:
DCHECK_EQ(op.from, MachineRepresentation::kFloat64);
DCHECK_EQ(op.to, MachineRepresentation::kWord32);
o = machine.Float64ExtractLowWord32();
break;
case Kind::kBitcast:
if (op.from == MachineRepresentation::kWord32 &&
op.to == MachineRepresentation::kWord64) {
o = machine.BitcastWord32ToWord64();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignExtend:
if (op.from == MachineRepresentation::kWord32 &&
op.to == MachineRepresentation::kWord64) {
o = machine.ChangeInt32ToInt64();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kZeroExtend:
if (op.from == MachineRepresentation::kWord32 &&
op.to == MachineRepresentation::kWord64) {
o = machine.ChangeUint32ToUint64();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kSignedNarrowing:
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
o = machine.ChangeFloat64ToInt64();
}
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
o = machine.ChangeFloat64ToInt32();
} else {
UNIMPLEMENTED();
}
break;
case Kind::kUnsignedNarrowing:
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord64) {
o = machine.ChangeFloat64ToUint64();
}
if (op.from == MachineRepresentation::kFloat64 &&
op.to == MachineRepresentation::kWord32) {
o = machine.ChangeFloat64ToUint32();
} else {
UNIMPLEMENTED();
}
break;
}
return AddNode(o, {GetNode(op.input())});
}
Node* ScheduleBuilder::ProcessOperation(const TaggedBitcastOp& op) {
const Operator* o;
if (op.from == MachineRepresentation::kTagged &&
op.to == MachineType::PointerRepresentation()) {
o = machine.BitcastTaggedToWord();
} else if (op.from == MachineType::PointerRepresentation() &&
op.to == MachineRepresentation::kTagged) {
o = machine.BitcastWordToTagged();
} else {
UNIMPLEMENTED();
}
return AddNode(o, {GetNode(op.input())});
}
Node* ScheduleBuilder::ProcessOperation(const PendingLoopPhiOp& op) {
UNREACHABLE();
}
Node* ScheduleBuilder::ProcessOperation(const ConstantOp& op) {
switch (op.kind) {
case ConstantOp::Kind::kWord32:
return AddNode(common.Int32Constant(static_cast<int32_t>(op.word32())),
{});
case ConstantOp::Kind::kWord64:
return AddNode(common.Int64Constant(static_cast<int64_t>(op.word64())),
{});
case ConstantOp::Kind::kExternal:
return AddNode(common.ExternalConstant(op.external_reference()), {});
case ConstantOp::Kind::kHeapObject:
return AddNode(common.HeapConstant(op.handle()), {});
case ConstantOp::Kind::kCompressedHeapObject:
return AddNode(common.CompressedHeapConstant(op.handle()), {});
case ConstantOp::Kind::kNumber:
return AddNode(common.NumberConstant(op.number()), {});
case ConstantOp::Kind::kTaggedIndex:
return AddNode(common.TaggedIndexConstant(op.tagged_index()), {});
case ConstantOp::Kind::kFloat64:
return AddNode(common.Float64Constant(op.float64()), {});
case ConstantOp::Kind::kFloat32:
return AddNode(common.Float32Constant(op.float32()), {});
case ConstantOp::Kind::kDelayedString:
return AddNode(common.DelayedStringConstant(op.delayed_string()), {});
}
}
Node* ScheduleBuilder::ProcessOperation(const LoadOp& op) {
intptr_t offset = op.offset;
if (op.kind == LoadOp::Kind::kOnHeap) {
CHECK_GE(offset, std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
Node* base = GetNode(op.base());
return AddNode(machine.Load(op.loaded_rep), {base, IntPtrConstant(offset)});
}
Node* ScheduleBuilder::ProcessOperation(const IndexedLoadOp& op) {
intptr_t offset = op.offset;
if (op.kind == IndexedLoadOp::Kind::kOnHeap) {
CHECK_GE(offset, std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
Node* base = GetNode(op.base());
Node* index = GetNode(op.index());
if (op.element_size_log2 != 0) {
index = IntPtrShl(index, IntPtrConstant(op.element_size_log2));
}
if (offset != 0) {
index = IntPtrAdd(index, IntPtrConstant(offset));
}
return AddNode(machine.Load(op.loaded_rep), {base, index});
}
Node* ScheduleBuilder::ProcessOperation(const StoreOp& op) {
intptr_t offset = op.offset;
if (op.kind == StoreOp::Kind::kOnHeap) {
CHECK(offset >= std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
Node* base = GetNode(op.base());
Node* value = GetNode(op.value());
return AddNode(
machine.Store(StoreRepresentation(op.stored_rep, op.write_barrier)),
{base, IntPtrConstant(offset), value});
}
Node* ScheduleBuilder::ProcessOperation(const IndexedStoreOp& op) {
intptr_t offset = op.offset;
if (op.kind == IndexedStoreOp::Kind::kOnHeap) {
CHECK(offset >= std::numeric_limits<int32_t>::min() + kHeapObjectTag);
offset -= kHeapObjectTag;
}
Node* base = GetNode(op.base());
Node* index = GetNode(op.index());
Node* value = GetNode(op.value());
if (op.element_size_log2 != 0) {
index = IntPtrShl(index, IntPtrConstant(op.element_size_log2));
}
if (offset != 0) {
index = IntPtrAdd(index, IntPtrConstant(offset));
}
return AddNode(
machine.Store(StoreRepresentation(op.stored_rep, op.write_barrier)),
{base, index, value});
}
Node* ScheduleBuilder::ProcessOperation(const ParameterOp& op) {
// Parameters need to be cached because the register allocator assumes that
// there are no duplicate nodes for the same parameter.
if (parameters.count(op.parameter_index)) {
return parameters[op.parameter_index];
}
Node* parameter = MakeNode(
common.Parameter(static_cast<int>(op.parameter_index), op.debug_name),
{tf_graph->start()});
schedule->AddNode(schedule->start(), parameter);
parameters[op.parameter_index] = parameter;
return parameter;
}
Node* ScheduleBuilder::ProcessOperation(const GotoOp& op) {
schedule->AddGoto(current_block, blocks[op.destination->index().id()]);
current_block = nullptr;
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const StackPointerGreaterThanOp& op) {
return AddNode(machine.StackPointerGreaterThan(op.kind),
{GetNode(op.stack_limit())});
}
Node* ScheduleBuilder::ProcessOperation(const LoadStackCheckOffsetOp& op) {
return AddNode(machine.LoadStackCheckOffset(), {});
}
Node* ScheduleBuilder::ProcessOperation(const CheckLazyDeoptOp& op) {
Node* call = GetNode(op.call());
Node* frame_state = GetNode(op.frame_state());
call->AppendInput(graph_zone, frame_state);
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const DeoptimizeIfOp& op) {
Node* condition = GetNode(op.condition());
Node* frame_state = GetNode(op.frame_state());
const Operator* o = op.negated
? common.DeoptimizeUnless(op.parameters->reason(),
op.parameters->feedback())
: common.DeoptimizeIf(op.parameters->reason(),
op.parameters->feedback());
return AddNode(o, {condition, frame_state});
}
Node* ScheduleBuilder::ProcessOperation(const DeoptimizeOp& op) {
Node* frame_state = GetNode(op.frame_state());
const Operator* o =
common.Deoptimize(op.parameters->reason(), op.parameters->feedback());
Node* node = MakeNode(o, {frame_state});
schedule->AddDeoptimize(current_block, node);
current_block = nullptr;
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const PhiOp& op) {
if (current_input_block->IsLoop()) {
DCHECK_EQ(op.input_count, 2);
Node* input = GetNode(op.input(0));
// The second `input` is a placeholder that is patched when we process the
// backedge.
Node* node = AddNode(common.Phi(op.rep, 2), {input, input});
loop_phis.emplace_back(node, op.input(1));
return node;
} else {
base::SmallVector<Node*, 8> inputs;
for (OpIndex i : op.inputs()) {
inputs.push_back(GetNode(i));
}
return AddNode(common.Phi(op.rep, op.input_count), base::VectorOf(inputs));
}
}
Node* ScheduleBuilder::ProcessOperation(const ProjectionOp& op) {
switch (op.kind) {
case ProjectionOp::Kind::kOverflowBit:
return AddNode(common.Projection(1), {GetNode(op.input())});
case ProjectionOp::Kind::kResult:
return AddNode(common.Projection(0), {GetNode(op.input())});
}
}
std::pair<Node*, MachineType> ScheduleBuilder::BuildDeoptInput(
FrameStateData::Iterator* it) {
switch (it->current_instr()) {
using Instr = FrameStateData::Instr;
case Instr::kInput: {
MachineType type;
OpIndex input;
it->ConsumeInput(&type, &input);
return {GetNode(input), type};
}
case Instr::kDematerializedObject: {
uint32_t obj_id;
uint32_t field_count;
it->ConsumeDematerializedObject(&obj_id, &field_count);
base::SmallVector<Node*, 16> fields;
ZoneVector<MachineType>& field_types =
*tf_graph->zone()->New<ZoneVector<MachineType>>(field_count,
tf_graph->zone());
for (uint32_t i = 0; i < field_count; ++i) {
std::pair<Node*, MachineType> p = BuildDeoptInput(it);
fields.push_back(p.first);
field_types[i] = p.second;
}
return {AddNode(common.TypedObjectState(obj_id, &field_types),
base::VectorOf(fields)),
MachineType::TaggedPointer()};
}
case Instr::kDematerializedObjectReference: {
uint32_t obj_id;
it->ConsumeDematerializedObjectReference(&obj_id);
return {AddNode(common.ObjectId(obj_id), {}),
MachineType::TaggedPointer()};
}
case Instr::kUnusedRegister:
UNREACHABLE();
}
}
// Create a mostly balanced tree of `StateValues` nodes.
Node* ScheduleBuilder::BuildStateValues(FrameStateData::Iterator* it,
int32_t size) {
constexpr int32_t kMaxStateValueInputCount = 8;
base::SmallVector<Node*, kMaxStateValueInputCount> inputs;
base::SmallVector<MachineType, kMaxStateValueInputCount> types;
SparseInputMask::BitMaskType input_mask = 0;
int32_t child_size =
(size + kMaxStateValueInputCount - 1) / kMaxStateValueInputCount;
// `state_value_inputs` counts the number of inputs used for the current
// `StateValues` node. It is gradually adjusted as nodes are shifted to lower
// levels in the tree.
int32_t state_value_inputs = size;
int32_t mask_size = 0;
for (int32_t i = 0; i < state_value_inputs; ++i) {
DCHECK_LT(i, kMaxStateValueInputCount);
++mask_size;
if (state_value_inputs <= kMaxStateValueInputCount) {
// All the remaining inputs fit at the current level.
if (it->current_instr() == FrameStateData::Instr::kUnusedRegister) {
it->ConsumeUnusedRegister();
} else {
std::pair<Node*, MachineType> p = BuildDeoptInput(it);
input_mask |= SparseInputMask::BitMaskType{1} << i;
inputs.push_back(p.first);
types.push_back(p.second);
}
} else {
// We have too many inputs, so recursively create another `StateValues`
// node.
input_mask |= SparseInputMask::BitMaskType{1} << i;
int32_t actual_child_size = std::min(child_size, state_value_inputs - i);
inputs.push_back(BuildStateValues(it, actual_child_size));
// This is a dummy type that shouldn't matter.
types.push_back(MachineType::AnyTagged());
// `child_size`-many inputs were shifted to the next level, being replaced
// with 1 `StateValues` node.
state_value_inputs = state_value_inputs - actual_child_size + 1;
}
}
input_mask |= SparseInputMask::kEndMarker << mask_size;
return AddNode(
common.TypedStateValues(graph_zone->New<ZoneVector<MachineType>>(
types.begin(), types.end(), graph_zone),
SparseInputMask(input_mask)),
base::VectorOf(inputs));
}
Node* ScheduleBuilder::BuildTaggedInput(FrameStateData::Iterator* it) {
std::pair<Node*, MachineType> p = BuildDeoptInput(it);
DCHECK(p.second.IsTagged());
return p.first;
}
Node* ScheduleBuilder::ProcessOperation(const FrameStateOp& op) {
const FrameStateInfo& info = op.data->frame_state_info;
auto it = op.data->iterator(op.state_values());
Node* parameter_state_values = BuildStateValues(&it, info.parameter_count());
Node* register_state_values = BuildStateValues(&it, info.local_count());
Node* accumulator_state_values = BuildStateValues(&it, info.stack_count());
Node* context = BuildTaggedInput(&it);
Node* closure = BuildTaggedInput(&it);
Node* parent =
op.inlined ? GetNode(op.parent_frame_state()) : tf_graph->start();
return AddNode(common.FrameState(info.bailout_id(), info.state_combine(),
info.function_info()),
{parameter_state_values, register_state_values,
accumulator_state_values, context, closure, parent});
}
Node* ScheduleBuilder::ProcessOperation(const CallOp& op) {
base::SmallVector<Node*, 16> inputs;
inputs.push_back(GetNode(op.callee()));
for (OpIndex i : op.arguments()) {
inputs.push_back(GetNode(i));
}
return AddNode(common.Call(op.descriptor), base::VectorOf(inputs));
}
Node* ScheduleBuilder::ProcessOperation(const UnreachableOp& op) {
Node* node = MakeNode(common.Throw(), {});
schedule->AddThrow(current_block, node);
current_block = nullptr;
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const ReturnOp& op) {
Node* pop_count = AddNode(common.Int32Constant(op.pop_count), {});
base::SmallVector<Node*, 8> inputs = {pop_count};
for (OpIndex i : op.return_values()) {
inputs.push_back(GetNode(i));
}
Node* node =
MakeNode(common.Return(static_cast<int>(op.return_values().size())),
base::VectorOf(inputs));
schedule->AddReturn(current_block, node);
current_block = nullptr;
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const BranchOp& op) {
Node* branch =
MakeNode(common.Branch(BranchHint::kNone), {GetNode(op.condition())});
BasicBlock* true_block = GetBlock(*op.if_true);
BasicBlock* false_block = GetBlock(*op.if_false);
schedule->AddBranch(current_block, branch, true_block, false_block);
true_block->AddNode(MakeNode(common.IfTrue(), {branch}));
false_block->AddNode(MakeNode(common.IfFalse(), {branch}));
current_block = nullptr;
return nullptr;
}
Node* ScheduleBuilder::ProcessOperation(const SwitchOp& op) {
size_t succ_count = op.cases.size() + 1;
Node* switch_node =
MakeNode(common.Switch(succ_count), {GetNode(op.input())});
base::SmallVector<BasicBlock*, 16> successors;
for (SwitchOp::Case c : op.cases) {
BasicBlock* case_block = GetBlock(*c.destination);
successors.push_back(case_block);
Node* case_node = MakeNode(common.IfValue(c.value), {switch_node});
schedule->AddNode(case_block, case_node);
}
BasicBlock* default_block = GetBlock(*op.default_case);
successors.push_back(default_block);
schedule->AddNode(default_block, MakeNode(common.IfDefault(), {switch_node}));
schedule->AddSwitch(current_block, switch_node, successors.data(),
successors.size());
current_block = nullptr;
return nullptr;
}
} // namespace
RecreateScheduleResult RecreateSchedule(const Graph& graph,
CallDescriptor* call_descriptor,
Zone* graph_zone, Zone* phase_zone) {
return ScheduleBuilder{graph, call_descriptor, graph_zone, phase_zone}.Run();
}
} // namespace v8::internal::compiler::turboshaft

View File

@ -0,0 +1,30 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_
#define V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_
namespace v8::internal {
class Zone;
}
namespace v8::internal::compiler {
class Schedule;
class Graph;
class CallDescriptor;
} // namespace v8::internal::compiler
namespace v8::internal::compiler::turboshaft {
class Graph;
struct RecreateScheduleResult {
compiler::Graph* graph;
Schedule* schedule;
};
RecreateScheduleResult RecreateSchedule(const Graph& graph,
CallDescriptor* call_descriptor,
Zone* graph_zone, Zone* phase_zone);
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_

View File

@ -946,6 +946,8 @@ DEFINE_FLOAT(script_delay_fraction, 0.0,
"busy wait after each Script::Run by the given fraction of the "
"run's duration")
DEFINE_BOOL(turboshaft, false, "enable TurboFan's TurboShaft phases")
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
"Enables optimizations which favor memory size over execution "

View File

@ -368,6 +368,8 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SimplifiedLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, StoreStoreElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TraceScheduleAndVerify) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildTurboShaft) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TurboshaftRecreateSchedule) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \

View File

@ -7,6 +7,7 @@
#include <deque>
#include <forward_list>
#include <initializer_list>
#include <list>
#include <map>
#include <queue>
@ -46,6 +47,11 @@ class ZoneVector : public std::vector<T, ZoneAllocator<T>> {
ZoneVector(std::initializer_list<T> list, Zone* zone)
: std::vector<T, ZoneAllocator<T>>(list, ZoneAllocator<T>(zone)) {}
ZoneVector& operator=(std::initializer_list<T> ilist) {
std::vector<T, ZoneAllocator<T>>::operator=(ilist);
return *this;
}
// Constructs a new vector and fills it with the contents of the range
// [first, last).
template <class InputIt>

View File

@ -6,8 +6,11 @@
#define V8_ZONE_ZONE_H_
#include <limits>
#include <memory>
#include <type_traits>
#include "src/base/logging.h"
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/utils/utils.h"
#include "src/zone/accounting-allocator.h"
@ -121,6 +124,20 @@ class V8_EXPORT_PRIVATE Zone final {
return static_cast<T*>(Allocate<TypeTag>(length * sizeof(T)));
}
template <typename T, typename TypeTag = T[]>
base::Vector<T> NewVector(size_t length, T value) {
T* new_array = NewArray<T, TypeTag>(length);
std::uninitialized_fill_n(new_array, length, value);
return {new_array, length};
}
template <typename T, typename TypeTag = std::remove_const_t<T>[]>
base::Vector<std::remove_const_t<T>> CloneVector(base::Vector<T> v) {
auto* new_array = NewArray<std::remove_const_t<T>, TypeTag>(v.size());
std::uninitialized_copy(v.begin(), v.end(), new_array);
return {new_array, v.size()};
}
// Return array of 'length' elements back to Zone. These bytes can be reused
// for following allocations.
//

View File

@ -269,6 +269,11 @@
# Needs deterministic test helpers for concurrent maglev tiering.
# TODO(jgruber,v8:7700): Implement ASAP.
'maglev/18': [SKIP],
# Stress variants cause operators that are currently still unsupported by
# TurboShaft.
# TODO(v8:12783)
'turboshaft/simple': [PASS, NO_VARIANTS],
}], # ALWAYS
##############################################################################

View File

@ -0,0 +1,17 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --turboshaft --allow-natives-syntax
function f(x) {
return x.a + x.b;
}
%PrepareFunctionForOptimization(f);
assertEquals(5, f({a: 2, b: 3}));
assertEquals(7, f({a: 2, b: 5}));
%OptimizeFunctionOnNextCall(f);
assertEquals(5, f({a: 2, b: 3}));
assertEquals(7, f({a: 2, b: 5}));