[wasm-gc][turbofan] Implement typing phase

We introduce a typing phase into the Turbofan compilation pipeline for
wasm-gc. It has two functionalities: (1) to type nodes that were not
typed during code generation (mainly phi nodes) and (2) to narrow types
as much as possible.
The following nodes are handled, which should be enough for our
purposes: TypeGuard, WasmTypeCast, AssertNotNull, Phi, LoadFromObject,
and LoadImmutableFromObject.
Loop phi types are computed by first assigning the type of the
non-recursive input, and updating once we have the type of the recursive
inputs, and repeating this process to a fixed point.

Drive-by: Remove the narrowing of function signatures during wasm
inlining, as it created some issues and should not be needed after this
series of changes.

Bug: v8:7748
Change-Id: I8a72488d5c221c4ae8257fc5abf6f0368cf10e96
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3678208
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Reviewed-by: Maya Lekova <mslekova@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80912}
This commit is contained in:
Manos Koukoutos 2022-06-02 10:12:57 +00:00 committed by V8 LUCI CQ
parent 97a3d69d47
commit aee0ec979a
16 changed files with 549 additions and 108 deletions

View File

@ -2880,6 +2880,8 @@ filegroup(
"src/compiler/wasm-graph-assembler.h",
"src/compiler/wasm-inlining.cc",
"src/compiler/wasm-inlining.h",
"src/compiler/wasm-typer.cc",
"src/compiler/wasm-typer.h",
],
"//conditions:default": [],
}),

View File

@ -3562,6 +3562,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/wasm-graph-assembler.h",
"src/compiler/wasm-inlining.h",
"src/compiler/wasm-loop-peeling.h",
"src/compiler/wasm-typer.h",
"src/debug/debug-wasm-objects-inl.h",
"src/debug/debug-wasm-objects.h",
"src/third_party/utf8-decoder/generalized-utf8-decoder.h",
@ -4062,6 +4063,7 @@ if (v8_enable_webassembly) {
"src/compiler/wasm-graph-assembler.cc",
"src/compiler/wasm-inlining.cc",
"src/compiler/wasm-loop-peeling.cc",
"src/compiler/wasm-typer.cc",
]
}

View File

@ -15,6 +15,7 @@
#include "src/compiler/wasm-compiler.h"
// TODO(wasm): Remove this include.
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-subtyping.h"
#include "src/zone/zone.h"
namespace v8 {
@ -24,19 +25,22 @@ namespace compiler {
Int64Lowering::Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine,
CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified,
Zone* zone, Signature<MachineRepresentation>* signature,
Zone* zone, const wasm::WasmModule* module,
Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case)
: zone_(zone),
graph_(graph),
: graph_(graph),
machine_(machine),
common_(common),
simplified_(simplified),
zone_(zone),
signature_(signature),
special_case_(std::move(special_case)),
state_(graph->NodeCount(), State::kUnvisited),
stack_(zone),
replacements_(nullptr),
signature_(signature),
placeholder_(graph->NewNode(common->Dead())),
special_case_(std::move(special_case)) {
int32_type_(Type::Wasm({wasm::kWasmI32, module}, graph->zone())),
float64_type_(Type::Wasm({wasm::kWasmF64, module}, graph->zone())) {
DCHECK_NOT_NULL(graph);
DCHECK_NOT_NULL(graph->end());
replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
@ -123,6 +127,14 @@ int GetReturnCountAfterLowering(Signature<MachineRepresentation>* signature) {
} // namespace
void Int64Lowering::SetInt32Type(Node* node) {
NodeProperties::SetType(node, int32_type_);
}
void Int64Lowering::SetFloat64Type(Node* node) {
NodeProperties::SetType(node, float64_type_);
}
void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
DCHECK_EQ(5, node->InputCount());
LowerMemoryBaseAndIndex(node);
@ -130,12 +142,14 @@ void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
node->ReplaceInput(2, GetReplacementLow(value));
node->InsertInput(zone(), 3, GetReplacementHigh(value));
NodeProperties::ChangeOp(node, op);
SetInt32Type(node);
ReplaceNodeWithProjections(node);
}
void Int64Lowering::LowerWord64AtomicNarrowOp(Node* node, const Operator* op) {
DefaultLowering(node, true);
NodeProperties::ChangeOp(node, op);
SetInt32Type(node);
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
@ -150,13 +164,19 @@ int Int64Lowering::GetParameterCountAfterLowering(
void Int64Lowering::GetIndexNodes(Node* index, Node** index_low,
Node** index_high) {
// We want to transform constant indices into constant indices, because
// wasm-typer depends on them.
Int32Matcher m(index);
Node* index_second =
m.HasResolvedValue()
? graph()->NewNode(common()->Int32Constant(m.ResolvedValue() + 4))
: graph()->NewNode(machine()->Int32Add(), index,
graph()->NewNode(common()->Int32Constant(4)));
#if defined(V8_TARGET_LITTLE_ENDIAN)
*index_low = index;
*index_high = graph()->NewNode(machine()->Int32Add(), index,
graph()->NewNode(common()->Int32Constant(4)));
*index_high = index_second;
#elif defined(V8_TARGET_BIG_ENDIAN)
*index_low = graph()->NewNode(machine()->Int32Add(), index,
graph()->NewNode(common()->Int32Constant(4)));
*index_low = index_second;
*index_high = index;
#endif
}
@ -184,6 +204,8 @@ void Int64Lowering::LowerLoadOperator(Node* node, MachineRepresentation rep,
}
node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
SetInt32Type(node);
SetInt32Type(high_node);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node);
@ -342,6 +364,8 @@ void Int64Lowering::LowerNode(Node* node) {
MachineRepresentation::kWord64) {
Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1),
graph()->start());
SetInt32Type(node);
SetInt32Type(high_node);
ReplaceNode(node, node, high_node);
}
}
@ -426,6 +450,8 @@ void Int64Lowering::LowerNode(Node* node) {
Node* high_node =
graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
GetReplacementHigh(right));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
break;
}
@ -495,6 +521,8 @@ void Int64Lowering::LowerNode(Node* node) {
Node* high_node =
graph()->NewNode(machine()->Word32Or(), GetReplacementHigh(left),
GetReplacementHigh(right));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
break;
}
@ -509,6 +537,8 @@ void Int64Lowering::LowerNode(Node* node) {
Node* high_node =
graph()->NewNode(machine()->Word32Xor(), GetReplacementHigh(left),
GetReplacementHigh(right));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
break;
}
@ -588,6 +618,7 @@ void Int64Lowering::LowerNode(Node* node) {
GetReplacementHigh(right))),
graph()->NewNode(common()->Int32Constant(0)));
SetInt32Type(replacement);
ReplaceNode(node, replacement, nullptr);
break;
}
@ -619,10 +650,11 @@ void Int64Lowering::LowerNode(Node* node) {
input = GetReplacementLow(input);
}
// We use SAR to preserve the sign in the high word.
ReplaceNode(
node, input,
Node* high_node =
graph()->NewNode(machine()->Word32Sar(), input,
graph()->NewNode(common()->Int32Constant(31))));
graph()->NewNode(common()->Int32Constant(31)));
SetInt32Type(high_node);
ReplaceNode(node, input, high_node);
node->NullAllInputs();
break;
}
@ -646,7 +678,7 @@ void Int64Lowering::LowerNode(Node* node) {
GetReplacementHigh(input));
Node* result = graph()->NewNode(machine()->Float64InsertLowWord32(),
high_half, GetReplacementLow(input));
SetFloat64Type(node);
ReplaceNode(node, result, nullptr);
break;
}
@ -661,7 +693,8 @@ void Int64Lowering::LowerNode(Node* node) {
graph()->NewNode(machine()->Float64ExtractLowWord32(), input);
Node* high_node =
graph()->NewNode(machine()->Float64ExtractHighWord32(), input);
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
break;
}
@ -713,6 +746,8 @@ void Int64Lowering::LowerNode(Node* node) {
graph()->NewNode(machine()->Word32Or(),
graph()->NewNode(op1, high_input, masked_shift),
graph()->NewNode(op2, low_input, inv_shift));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
}
} else {
@ -785,6 +820,8 @@ void Int64Lowering::LowerNode(Node* node) {
machine()->Word32Or(),
graph()->NewNode(machine()->Word32And(), rotate_high, mask1),
graph()->NewNode(machine()->Word32And(), rotate_low, mask2));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
}
break;
@ -805,6 +842,7 @@ void Int64Lowering::LowerNode(Node* node) {
GetReplacementLow(input)),
graph()->NewNode(common()->Int32Constant(32))),
graph()->NewNode(machine()->Word32Clz(), GetReplacementHigh(input)));
SetInt32Type(low_node);
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
@ -826,6 +864,7 @@ void Int64Lowering::LowerNode(Node* node) {
graph()->NewNode(common()->Int32Constant(32))),
graph()->NewNode(machine()->Word32Ctz().op(),
GetReplacementLow(input)));
SetInt32Type(low_node);
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
@ -841,13 +880,14 @@ void Int64Lowering::LowerNode(Node* node) {
// We assume that a Word64Popcnt node only has been created if
// Word32Popcnt is actually supported.
DCHECK(machine()->Word32Popcnt().IsSupported());
ReplaceNode(node, graph()->NewNode(
machine()->Int32Add(),
graph()->NewNode(machine()->Word32Popcnt().op(),
GetReplacementLow(input)),
graph()->NewNode(machine()->Word32Popcnt().op(),
GetReplacementHigh(input))),
graph()->NewNode(common()->Int32Constant(0)));
Node* low_node =
graph()->NewNode(machine()->Int32Add(),
graph()->NewNode(machine()->Word32Popcnt().op(),
GetReplacementLow(input)),
graph()->NewNode(machine()->Word32Popcnt().op(),
GetReplacementHigh(input)));
SetInt32Type(low_node);
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
case IrOpcode::kPhi: {
@ -875,6 +915,8 @@ void Int64Lowering::LowerNode(Node* node) {
Node* high_node = graph()->NewNode(
common()->LoopExitValue(MachineRepresentation::kWord32),
GetReplacementHigh(node->InputAt(0)), node->InputAt(1));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
} else {
DefaultLowering(node);
@ -883,11 +925,13 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64ReverseBytes: {
Node* input = node->InputAt(0);
ReplaceNode(node,
graph()->NewNode(machine()->Word32ReverseBytes(),
GetReplacementHigh(input)),
graph()->NewNode(machine()->Word32ReverseBytes(),
GetReplacementLow(input)));
Node* low_node = graph()->NewNode(machine()->Word32ReverseBytes(),
GetReplacementHigh(input));
Node* high_node = graph()->NewNode(machine()->Word32ReverseBytes(),
GetReplacementLow(input));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
break;
}
case IrOpcode::kSignExtendWord8ToInt64: {
@ -897,13 +941,15 @@ void Int64Lowering::LowerNode(Node* node) {
input = GetReplacementLow(input);
}
// Sign extend low node to Int32
input = graph()->NewNode(machine()->SignExtendWord8ToInt32(), input);
Node* low_node =
graph()->NewNode(machine()->SignExtendWord8ToInt32(), input);
// We use SAR to preserve the sign in the high word.
ReplaceNode(
node, input,
graph()->NewNode(machine()->Word32Sar(), input,
graph()->NewNode(common()->Int32Constant(31))));
Node* high_node =
graph()->NewNode(machine()->Word32Sar(), low_node,
graph()->NewNode(common()->Int32Constant(31)));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
node->NullAllInputs();
break;
}
@ -914,13 +960,15 @@ void Int64Lowering::LowerNode(Node* node) {
input = GetReplacementLow(input);
}
// Sign extend low node to Int32
input = graph()->NewNode(machine()->SignExtendWord16ToInt32(), input);
Node* low_node =
graph()->NewNode(machine()->SignExtendWord16ToInt32(), input);
// We use SAR to preserve the sign in the high word.
ReplaceNode(
node, input,
graph()->NewNode(machine()->Word32Sar(), input,
graph()->NewNode(common()->Int32Constant(31))));
Node* high_node =
graph()->NewNode(machine()->Word32Sar(), low_node,
graph()->NewNode(common()->Int32Constant(31)));
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
node->NullAllInputs();
break;
}
@ -934,6 +982,7 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNodeWithProjections(node);
} else {
NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
SetInt32Type(node);
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
@ -990,6 +1039,7 @@ void Int64Lowering::LowerNode(Node* node) {
DefaultLowering(node, true);
NodeProperties::ChangeOp(node,
machine()->Word32AtomicCompareExchange(type));
SetInt32Type(node);
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
@ -1006,9 +1056,13 @@ void Int64Lowering::LowerNode(Node* node) {
DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
int32_t lane = OpParameter<int32_t>(node->op());
ReplaceNode(
node, graph()->NewNode(machine()->I32x4ExtractLane(lane * 2), input),
graph()->NewNode(machine()->I32x4ExtractLane(lane * 2 + 1), input));
Node* low_node =
graph()->NewNode(machine()->I32x4ExtractLane(lane * 2), input);
Node* high_node =
graph()->NewNode(machine()->I32x4ExtractLane(lane * 2 + 1), input);
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
break;
}
case IrOpcode::kI64x2ReplaceLane: {
@ -1040,7 +1094,7 @@ void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
GetReplacementHigh(right)),
graph()->NewNode(low_word_op, GetReplacementLow(left),
GetReplacementLow(right))));
SetInt32Type(replacement);
ReplaceNode(node, replacement, nullptr);
}
@ -1130,6 +1184,8 @@ void Int64Lowering::ReplaceNodeWithProjections(Node* node) {
graph()->NewNode(common()->Projection(0), node, graph()->start());
Node* high_node =
graph()->NewNode(common()->Projection(1), node, graph()->start());
SetInt32Type(low_node);
SetInt32Type(high_node);
ReplaceNode(node, low_node, high_node);
}

View File

@ -35,7 +35,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine,
CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified_,
Zone* zone, Signature<MachineRepresentation>* signature,
Zone* zone, const wasm::WasmModule* module,
Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr);
void LowerGraph();
@ -72,6 +73,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
const CallDescriptor* LowerCallDescriptor(
const CallDescriptor* call_descriptor);
void SetInt32Type(Node* node);
void SetFloat64Type(Node* node);
void ReplaceNode(Node* old, Node* new_low, Node* new_high);
bool HasReplacementLow(Node* node);
@ -88,17 +91,20 @@ class V8_EXPORT_PRIVATE Int64Lowering {
int input_index;
};
Zone* zone_;
Graph* const graph_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
SimplifiedOperatorBuilder* simplified_;
Zone* zone_;
Signature<MachineRepresentation>* signature_;
std::unique_ptr<Int64LoweringSpecialCase> special_case_;
std::vector<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
Signature<MachineRepresentation>* signature_;
Node* placeholder_;
std::unique_ptr<Int64LoweringSpecialCase> special_case_;
// Caches for node types, so we do not waste memory.
Type int32_type_;
Type float64_type_;
};
} // namespace compiler

View File

@ -109,6 +109,7 @@
#include "src/compiler/wasm-gc-lowering.h"
#include "src/compiler/wasm-inlining.h"
#include "src/compiler/wasm-loop-peeling.h"
#include "src/compiler/wasm-typer.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@ -2059,6 +2060,19 @@ struct TurboshaftRecreateSchedulePhase {
};
#if V8_ENABLE_WEBASSEMBLY
struct WasmTypingPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmTyping)
void Run(PipelineData* data, Zone* temp_zone, uint32_t function_index) {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
WasmTyper typer(&graph_reducer, data->mcgraph(), function_index);
AddReducer(data, &graph_reducer, &typer);
graph_reducer.ReduceGraph();
}
};
struct WasmGCLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(WasmGCLowering)
@ -3283,6 +3297,8 @@ void Pipeline::GenerateCodeForWasmFunction(
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_experimental_wasm_gc) {
pipeline.Run<WasmTypingPhase>(function_index);
pipeline.RunPrintAndVerify(WasmTypingPhase::phase_name(), true);
pipeline.Run<WasmGCLoweringPhase>();
pipeline.RunPrintAndVerify(WasmGCLoweringPhase::phase_name(), true);
}

View File

@ -3990,7 +3990,8 @@ CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
void WasmGraphBuilder::LowerInt64(Signature<MachineRepresentation>* sig) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
gasm_->simplified(), mcgraph()->zone(), sig,
gasm_->simplified(), mcgraph()->zone(),
env_ != nullptr ? env_->module : nullptr, sig,
std::move(lowering_special_case_));
r.LowerGraph();
}
@ -7284,7 +7285,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps);
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(),
mcgraph()->common(), gasm_->simplified(),
mcgraph()->zone(), &c_entry_sig);
mcgraph()->zone(), module_, &c_entry_sig);
r.LowerGraph();
}
}

View File

@ -139,27 +139,6 @@ void WasmInliner::Finalize() {
&module()->functions[candidate.inlinee_index];
base::Vector<const byte> function_bytes =
wire_bytes_->GetCode(inlinee->code);
// We use the signature based on the real argument types stored in the call
// node. This is more specific than the callee's formal signature and might
// enable some optimizations.
const wasm::FunctionSig* specialized_sig =
CallDescriptorOf(call->op())->wasm_sig();
#if DEBUG
// Check that the real signature is a subtype of the formal one.
const wasm::FunctionSig* formal_sig =
WasmGraphBuilder::Int64LoweredSig(zone(), inlinee->sig);
CHECK_EQ(specialized_sig->parameter_count(), formal_sig->parameter_count());
CHECK_EQ(specialized_sig->return_count(), formal_sig->return_count());
for (size_t i = 0; i < specialized_sig->parameter_count(); i++) {
CHECK(wasm::IsSubtypeOf(specialized_sig->GetParam(i),
formal_sig->GetParam(i), module()));
}
for (size_t i = 0; i < specialized_sig->return_count(); i++) {
CHECK(wasm::IsSubtypeOf(formal_sig->GetReturn(i),
specialized_sig->GetReturn(i), module()));
}
#endif
wasm::WasmFeatures detected;
std::vector<WasmLoopInfo> inlinee_loop_infos;
@ -167,12 +146,12 @@ void WasmInliner::Finalize() {
size_t subgraph_min_node_id = graph()->NodeCount();
Node* inlinee_start;
Node* inlinee_end;
for (const wasm::FunctionSig* sig = specialized_sig;;) {
const wasm::FunctionBody inlinee_body(sig, inlinee->code.offset(),
function_bytes.begin(),
function_bytes.end());
WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
source_positions_);
const wasm::FunctionBody inlinee_body(inlinee->sig, inlinee->code.offset(),
function_bytes.begin(),
function_bytes.end());
WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
source_positions_);
{
Graph::SubgraphScope scope(graph());
wasm::DecodeResult result = wasm::BuildTFGraph(
zone()->allocator(), env_->enabled_features, module(), &builder,
@ -185,19 +164,11 @@ void WasmInliner::Finalize() {
builder.LowerInt64(WasmGraphBuilder::kCalledFromWasm);
inlinee_start = graph()->start();
inlinee_end = graph()->end();
break;
} else {
// Otherwise report failure.
Trace(candidate, "failed to compile");
return;
}
if (sig == specialized_sig) {
// One possible reason for failure is the opportunistic signature
// specialization. Try again without that.
sig = inlinee->sig;
inlinee_loop_infos.clear();
Trace(candidate, "retrying with original signature");
continue;
}
// Otherwise report failure.
Trace(candidate, "failed to compile");
return;
}
size_t additional_nodes = graph()->NodeCount() - subgraph_min_node_id;

267
src/compiler/wasm-typer.cc Normal file
View File

@ -0,0 +1,267 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/wasm-typer.h"
#include "src/base/logging.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/simplified-operator.h"
#include "src/utils/utils.h"
#include "src/wasm/object-access.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-subtyping.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(...) \
if (FLAG_trace_wasm_typer) PrintF(__VA_ARGS__);
WasmTyper::WasmTyper(Editor* editor, MachineGraph* mcgraph,
uint32_t function_index)
: AdvancedReducer(editor),
function_index_(function_index),
mcgraph_(mcgraph),
graph_zone_(mcgraph->graph()->zone()) {}
namespace {
bool AllInputsTyped(Node* node) {
for (int i = 0; i < node->op()->ValueInputCount(); i++) {
if (!NodeProperties::IsTyped(NodeProperties::GetValueInput(node, i))) {
return false;
}
}
return true;
}
// Traverse the fields of a struct until we find one at offset equal to
// {offset}, and return its type.
// If we are in a 32-bit platform, the code has undergone int64 lowering:
// loads from i64 fields have been transformed into a pair of i32 loads. The
// first load has the offset of the original field, and the second one has
// an offset which is greater by size of i32.
// TODO(manoskouk): Improve this.
wasm::ValueType StructFieldFromOffset(const wasm::StructType* type,
uint32_t offset, bool is_32) {
for (uint32_t index = 0; index < type->field_count(); index++) {
uint32_t field_offset = wasm::ObjectAccess::ToTagged(
WasmStruct::kHeaderSize + type->field_offset(index));
if (is_32 && type->field(index) == wasm::kWasmI64 &&
field_offset + wasm::kWasmI32.value_kind_size() == offset) {
return wasm::kWasmI32;
}
if (field_offset == offset) {
wasm::ValueType field_type = type->field(index);
return is_32 && field_type == wasm::kWasmI64 ? wasm::kWasmI32
: field_type.Unpacked();
}
}
return wasm::kWasmBottom;
}
} // namespace
Reduction WasmTyper::Reduce(Node* node) {
using TypeInModule = wasm::TypeInModule;
TypeInModule computed_type;
switch (node->opcode()) {
case IrOpcode::kTypeGuard: {
if (!AllInputsTyped(node)) return NoChange();
TypeInModule guarded_type = TypeGuardTypeOf(node->op()).AsWasm();
TypeInModule input_type =
NodeProperties::GetType(NodeProperties::GetValueInput(node, 0))
.AsWasm();
// Note: The intersection type might be bottom. In this case, we are in a
// dead branch: Type this node as bottom and wait for the
// WasmGCOperatorReducer to remove it.
computed_type = wasm::Intersection(guarded_type, input_type);
break;
}
case IrOpcode::kWasmTypeCast: {
if (!AllInputsTyped(node)) return NoChange();
TypeInModule object_type =
NodeProperties::GetType(NodeProperties::GetValueInput(node, 0))
.AsWasm();
TypeInModule rtt_type =
NodeProperties::GetType(NodeProperties::GetValueInput(node, 1))
.AsWasm();
wasm::ValueType to_type =
wasm::ValueType::Ref(rtt_type.type.ref_index(), wasm::kNullable);
computed_type = wasm::Intersection(object_type.type, to_type,
object_type.module, rtt_type.module);
if (object_type.type.is_nullable() && computed_type.type.is_bottom()) {
// In this case, the value can only be null; we still cannot type it as
// bottom.
// TODO(manoskouk): Improve when we have nullref.
computed_type.type = to_type;
}
break;
}
case IrOpcode::kAssertNotNull: {
if (!AllInputsTyped(node)) return NoChange();
TypeInModule object_type =
NodeProperties::GetType(NodeProperties::GetValueInput(node, 0))
.AsWasm();
computed_type = {object_type.type.AsNonNull(), object_type.module};
break;
}
case IrOpcode::kPhi: {
if (!AllInputsTyped(node)) {
bool is_loop_phi =
NodeProperties::GetControlInput(node)->opcode() == IrOpcode::kLoop;
// For a merge phi, we need all inputs to be typed.
if (!is_loop_phi) return NoChange();
// For a loop phi, we can forward the non-recursive-input type. We can
// recompute the type when the rest of the inputs' types are computed.
Node* non_recursive_input = NodeProperties::GetValueInput(node, 0);
if (!NodeProperties::IsTyped(non_recursive_input)) return NoChange();
computed_type = NodeProperties::GetType(non_recursive_input).AsWasm();
TRACE("function: %d, loop phi node: %d, type: %s\n", function_index_,
node->id(), computed_type.type.name().c_str());
break;
}
computed_type =
NodeProperties::GetType(NodeProperties::GetValueInput(node, 0))
.AsWasm();
for (int i = 1; i < node->op()->ValueInputCount(); i++) {
Node* input = NodeProperties::GetValueInput(node, i);
TypeInModule input_type = NodeProperties::GetType(input).AsWasm();
// We do not want union of types from unreachable branches.
if (!input_type.type.is_bottom()) {
computed_type = wasm::Union(computed_type, input_type);
}
}
TRACE(
"function: %d, phi node: %d, input#: %d, input0:%d:%s, input1:%d:%s, "
"type: %s\n",
function_index_, node->id(), node->op()->ValueInputCount(),
node->InputAt(0)->id(),
NodeProperties::GetType(node->InputAt(0))
.AsWasm()
.type.name()
.c_str(),
node->InputAt(1)->id(),
node->op()->ValueInputCount() > 1
? NodeProperties::GetType(node->InputAt(1))
.AsWasm()
.type.name()
.c_str()
: "<control>",
computed_type.type.name().c_str());
break;
}
case IrOpcode::kLoadFromObject:
case IrOpcode::kLoadImmutableFromObject: {
Node* object = NodeProperties::GetValueInput(node, 0);
Node* offset = NodeProperties::GetValueInput(node, 1);
// This can happen either because the object has not been typed yet, or
// because it is an internal VM object (e.g. the instance).
if (!NodeProperties::IsTyped(object)) return NoChange();
TypeInModule object_type = NodeProperties::GetType(object).AsWasm();
// This can happen in unreachable branches.
if (object_type.type.is_bottom()) {
computed_type = {wasm::kWasmBottom, object_type.module};
break;
}
if (object_type.type.is_rtt()) return NoChange();
DCHECK(object_type.type.is_object_reference());
IntPtrMatcher m(offset);
// Do not modify if we are getting the map.
if (m.Is(wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset))) {
return NoChange();
}
// Do not modify if we are retrieving the array length.
if (object_type.type.is_reference_to(wasm::HeapType::kArray) &&
m.Is(wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset))) {
return NoChange();
}
uint32_t ref_index = object_type.type.ref_index();
DCHECK(object_type.module->has_type(ref_index));
wasm::TypeDefinition type_def = object_type.module->types[ref_index];
switch (type_def.kind) {
case wasm::TypeDefinition::kFunction:
// This can happen for internal structures only.
return NoChange();
case wasm::TypeDefinition::kStruct: {
wasm::ValueType field_type = StructFieldFromOffset(
type_def.struct_type, static_cast<uint32_t>(m.ResolvedValue()),
mcgraph_->machine()->Is32());
if (field_type.is_bottom()) {
TRACE(
"Error - Bottom struct field. function: %d, node %d:%s, "
"input0: %d, type: %s, offset %d\n",
function_index_, node->id(), node->op()->mnemonic(),
node->InputAt(0)->id(), object_type.type.name().c_str(),
static_cast<int>(m.ResolvedValue()));
UNREACHABLE();
}
computed_type = {field_type, object_type.module};
break;
}
case wasm::TypeDefinition::kArray: {
// Do not modify if we are retrieving the array length.
if (m.Is(wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset))) {
return NoChange();
}
wasm::ValueType element_type = type_def.array_type->element_type();
// We have to consider that, after int64 lowering in 32-bit platforms,
// loads from i64 arrays get transformed into pairs of i32 loads.
computed_type = {
mcgraph_->machine()->Is32() && element_type == wasm::kWasmI64
? wasm::kWasmI32
: element_type.Unpacked(),
object_type.module};
break;
}
}
break;
}
default:
return NoChange();
}
if (NodeProperties::IsTyped(node)) {
TypeInModule current_type = NodeProperties::GetType(node).AsWasm();
if (!(current_type.type.is_bottom() || computed_type.type.is_bottom() ||
wasm::IsSubtypeOf(current_type.type, computed_type.type,
current_type.module, computed_type.module) ||
wasm::IsSubtypeOf(computed_type.type, current_type.type,
computed_type.module, current_type.module))) {
TRACE(
"Error - Incompatible types. function: %d, node: %d:%s, input0:%d, "
"current %s, computed %s\n",
function_index_, node->id(), node->op()->mnemonic(),
node->InputAt(0)->id(), current_type.type.name().c_str(),
computed_type.type.name().c_str());
UNREACHABLE();
}
if (wasm::EquivalentTypes(current_type.type, computed_type.type,
current_type.module, computed_type.module)) {
return NoChange();
}
}
TRACE("function: %d, node: %d:%s, from: %s, to: %s\n", function_index_,
node->id(), node->op()->mnemonic(),
NodeProperties::IsTyped(node)
? NodeProperties::GetType(node).AsWasm().type.name().c_str()
: "<untyped>",
computed_type.type.name().c_str());
NodeProperties::SetType(node, Type::Wasm(computed_type, graph_zone_));
return Changed(node);
}
#undef TRACE
} // namespace compiler
} // namespace internal
} // namespace v8

39
src/compiler/wasm-typer.h Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if !V8_ENABLE_WEBASSEMBLY
#error This header should only be included if WebAssembly is enabled.
#endif // !V8_ENABLE_WEBASSEMBLY
#ifndef V8_COMPILER_WASM_TYPER_H_
#define V8_COMPILER_WASM_TYPER_H_
#include "src/compiler/graph-reducer.h"
#include "src/compiler/wasm-graph-assembler.h"
namespace v8 {
namespace internal {
namespace compiler {
class MachineGraph;
class WasmTyper final : public AdvancedReducer {
public:
WasmTyper(Editor* editor, MachineGraph* mcgraph, uint32_t function_index);
const char* reducer_name() const override { return "WasmTyper"; }
Reduction Reduce(Node* node) final;
private:
uint32_t function_index_;
MachineGraph* mcgraph_;
Zone* graph_zone_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_WASM_TYPER_H_

View File

@ -1122,6 +1122,7 @@ DEFINE_BOOL(wasm_speculative_inlining, false,
DEFINE_BOOL(trace_wasm_inlining, false, "trace wasm inlining")
DEFINE_BOOL(trace_wasm_speculative_inlining, false,
"trace wasm speculative inlining")
DEFINE_BOOL(trace_wasm_typer, false, "trace wasm typer")
DEFINE_BOOL(wasm_type_canonicalization, false,
"apply isorecursive canonicalization on wasm types")
DEFINE_IMPLICATION(wasm_speculative_inlining, wasm_dynamic_tiering)

View File

@ -384,6 +384,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopPeeling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmTyping) \
\
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \

View File

@ -60,6 +60,9 @@ class StructType : public ZoneObject {
}
bool operator!=(const StructType& other) const { return !(*this == other); }
// Returns the offset of this field in the runtime representation of the
// object, from the start of the object fields (disregarding the object
// header).
uint32_t field_offset(uint32_t index) const {
DCHECK_LT(index, field_count());
if (index == 0) return 0;
@ -124,6 +127,15 @@ class StructType : public ZoneObject {
const bool* const mutabilities_;
};
inline std::ostream& operator<<(std::ostream& out, StructType type) {
out << "[";
for (ValueType field : type.fields()) {
out << field.name() << ", ";
}
out << "]";
return out;
}
class ArrayType : public ZoneObject {
public:
constexpr explicit ArrayType(ValueType rep, bool mutability)

View File

@ -112,6 +112,8 @@ struct TypeInModule {
TypeInModule(ValueType type, const WasmModule* module)
: type(type), module(module) {}
TypeInModule() : TypeInModule(kWasmBottom, nullptr) {}
bool operator==(const TypeInModule& other) const {
return type == other.type && module == other.module;
}

View File

@ -517,7 +517,7 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode(Isolate* isolate) {
rep_builder.AddParam(MachineRepresentation::kWord32);
}
compiler::Int64Lowering r(graph(), machine(), common(), simplified(),
zone(), rep_builder.Build());
zone(), nullptr, rep_builder.Build());
r.LowerGraph();
}

View File

@ -0,0 +1,68 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --experimental-wasm-gc --no-liftoff
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// Test that we can eliminate type checks based on narrowed argument types
// (by inspecting the resulting graph).
(function WasmTypedOptimizationsTest() {
let builder = new WasmModuleBuilder();
let top = builder.addStruct([makeField(kWasmI32, true)]);
let middle = builder.addStruct([makeField(kWasmI32, true),
makeField(kWasmI64, false)],
top);
let bottom1 = builder.addStruct([makeField(kWasmI32, true),
makeField(kWasmI64, false),
makeField(kWasmI32, true)],
middle);
let bottom2 = builder.addStruct([makeField(kWasmI32, true),
makeField(kWasmI64, false),
makeField(kWasmI64, false)],
middle);
builder.addFunction("main", makeSig(
[wasmRefType(bottom1), wasmRefType(bottom2)], [kWasmI32]))
.addLocals(wasmOptRefType(top), 1)
.addLocals(kWasmI32, 1)
.addBody([
// temp = x0;
kExprLocalGet, 0, kExprLocalSet, 2,
// while (true) {
kExprLoop, kWasmVoid,
// if (ref.test temp bottom1) {
kExprLocalGet, 2, kGCPrefix, kExprRefTestStatic, bottom1,
kExprIf, kWasmVoid,
// counter += ((bottom1) temp).field_2;
// TODO(manoskouk): Implement path-based type tracking so we can
// eliminate this check.
kExprLocalGet, 2, kGCPrefix, kExprRefCastStatic, bottom1,
kGCPrefix, kExprStructGet, bottom1, 2,
kExprLocalGet, 3, kExprI32Add, kExprLocalSet, 3,
// temp = x1;
kExprLocalGet, 1, kExprLocalSet, 2,
// } else {
kExprElse,
// counter += (i32) ((middle) temp).field_1
// Note: This cast should get optimized away, as temp only gets
// assigned to {bottom1} and {bottom2}.
kExprLocalGet, 2, kGCPrefix, kExprRefCastStatic, middle,
kGCPrefix, kExprStructGet, middle, 1, kExprI32ConvertI64,
kExprLocalGet, 3, kExprI32Add, kExprLocalSet, 3,
// temp = x0;
kExprLocalGet, 0, kExprLocalSet, 2,
// }
kExprEnd,
// if (counter < 100) continue; break;
kExprLocalGet, 3, kExprI32Const, 100, kExprI32LtS,
kExprBrIf, 0,
// }
kExprEnd,
// return counter;
kExprLocalGet, 3])
.exportFunc();
builder.instantiate({});
})();

View File

@ -50,7 +50,7 @@ class Int64LoweringTest : public GraphTest {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
signature);
nullptr, signature);
lowering.LowerGraph();
}
@ -69,7 +69,8 @@ class Int64LoweringTest : public GraphTest {
sig_builder.AddReturn(rep);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build(), std::move(special_case));
nullptr, sig_builder.Build(),
std::move(special_case));
lowering.LowerGraph();
}
@ -153,8 +154,7 @@ TEST_F(Int64LoweringTest, Int64Constant) {
#define LOAD_VERIFY(kLoad) \
Matcher<Node*> high_word_load_matcher = \
Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), \
start(), start()); \
IsInt32Constant(index + 4), start(), start()); \
\
EXPECT_THAT( \
graph()->end()->InputAt(1), \
@ -218,9 +218,8 @@ TEST_F(Int64LoweringTest, Int64LoadImmutable) {
Capture<Node*> high_word_load;
#if defined(V8_TARGET_LITTLE_ENDIAN)
Matcher<Node*> high_word_load_matcher =
IsLoadImmutable(MachineType::Int32(), IsInt32Constant(base),
IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)));
Matcher<Node*> high_word_load_matcher = IsLoadImmutable(
MachineType::Int32(), IsInt32Constant(base), IsInt32Constant(index + 4));
EXPECT_THAT(
graph()->end()->InputAt(1),
@ -234,9 +233,8 @@ TEST_F(Int64LoweringTest, Int64LoadImmutable) {
EXPECT_THAT(
graph()->end()->InputAt(1),
IsReturn2(IsLoadImmutable(
MachineType::Int32(), IsInt32Constant(base),
IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4))),
IsReturn2(IsLoadImmutable(MachineType::Int32(), IsInt32Constant(base),
IsInt32Constant(index + 4)),
AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
start(), start()));
#endif
@ -247,14 +245,13 @@ TEST_F(Int64LoweringTest, Int64LoadImmutable) {
EXPECT_THAT( \
graph()->end()->InputAt(1), \
IsReturn(IsInt32Constant(return_value), \
Is##kStore( \
kRep, IsInt32Constant(base), IsInt32Constant(index), \
IsInt32Constant(low_word_value(0)), \
Is##kStore( \
kRep, IsInt32Constant(base), \
IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)), \
IsInt32Constant(high_word_value(0)), start(), start()), \
start()), \
Is##kStore(kRep, IsInt32Constant(base), IsInt32Constant(index), \
IsInt32Constant(low_word_value(0)), \
Is##kStore(kRep, IsInt32Constant(base), \
IsInt32Constant(index + 4), \
IsInt32Constant(high_word_value(0)), \
start(), start()), \
start()), \
start()));
#elif defined(V8_TARGET_BIG_ENDIAN)
#define STORE_VERIFY(kStore, kRep) \
@ -291,7 +288,7 @@ TEST_F(Int64LoweringTest, Int64LoadImmutable) {
NodeProperties::MergeControlToEnd(graph(), common(), ret); \
\
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(), \
sig_builder.Build()); \
nullptr, sig_builder.Build()); \
lowering.LowerGraph(); \
\
STORE_VERIFY(kStore, kRep32)
@ -325,7 +322,7 @@ TEST_F(Int64LoweringTest, Int32Store) {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build());
nullptr, sig_builder.Build());
lowering.LowerGraph();
EXPECT_THAT(