Reland "[wasm-gc] Liftoff support part 4: subtyping"

This is a reland of dc369749c7
Changes: relaxed --liftoff-only mode to still allow bailing
out due to missing CPU support.

Original change's description:
> [wasm-gc] Liftoff support part 4: subtyping
>
> This adds support for the following instructions:
> struct.new_default, rtt.sub, ref.test, ref.cast
>
> Bug: v8:7748
> Change-Id: I7423ddd7a83c80cb1e82c620780c27bec59ec762
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2593341
> Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71805}

Bug: v8:7748
Change-Id: If31fcee5e7e173d7c2a6e1c624f4ff04cec7fe9c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2596338
Auto-Submit: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71823}
This commit is contained in:
Jakob Kummerow 2020-12-17 01:37:09 +01:00 committed by Commit Bot
parent 63b78f2b01
commit f475e99021
6 changed files with 138 additions and 87 deletions

View File

@ -231,9 +231,9 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
}
builtin WasmAllocateRtt(implicit context: Context)(
typeIndex: Smi, parent: Map): Map {
tail runtime::WasmAllocateRtt(context, typeIndex, parent);
builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
tail runtime::WasmAllocateRtt(
LoadContextFromFrame(), SmiTag(typeIndex), parent);
}
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {

View File

@ -5701,12 +5701,10 @@ Node* WasmGraphBuilder::RttCanon(wasm::HeapType type) {
}
Node* WasmGraphBuilder::RttSub(wasm::HeapType type, Node* parent_rtt) {
return CALL_BUILTIN(
WasmAllocateRtt,
graph()->NewNode(
mcgraph()->common()->NumberConstant(type.representation())),
parent_rtt,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
return CALL_BUILTIN(WasmAllocateRtt,
graph()->NewNode(mcgraph()->common()->Int32Constant(
type.representation())),
parent_rtt);
}
void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {

View File

@ -403,7 +403,10 @@ class LiftoffCompiler {
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
detail);
UnuseLabels(decoder);
if (FLAG_liftoff_only) {
// --liftoff-only ensures that tests actually exercise the Liftoff path
// without bailing out. Bailing out due to (simulated) lack of CPU support
// is okay though.
if (FLAG_liftoff_only && reason != kMissingCPUFeature) {
FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s",
detail);
}
@ -415,26 +418,9 @@ class LiftoffCompiler {
return true;
}
LiftoffBailoutReason BailoutReasonForType(ValueType type) {
switch (type.kind()) {
case ValueType::kS128:
return kSimd;
case ValueType::kOptRef:
case ValueType::kRef:
if (type.is_reference_to(HeapType::kExn)) {
return kExceptionHandling;
} else {
return kRefTypes;
}
case ValueType::kBottom:
return kMultiValue;
default:
return kOtherReason;
}
}
bool CheckSupportedType(FullDecoder* decoder, ValueType type,
const char* context) {
LiftoffBailoutReason bailout_reason = kOtherReason;
switch (type.kind()) {
case ValueType::kI32:
case ValueType::kI64:
@ -443,6 +429,7 @@ class LiftoffCompiler {
return true;
case ValueType::kS128:
if (CpuFeatures::SupportsWasmSimd128()) return true;
bailout_reason = kMissingCPUFeature;
break;
case ValueType::kRef:
case ValueType::kOptRef:
@ -450,12 +437,16 @@ class LiftoffCompiler {
case ValueType::kI8:
case ValueType::kI16:
if (FLAG_experimental_liftoff_extern_ref) return true;
if (type.is_reference_to(HeapType::kExn)) {
bailout_reason = kExceptionHandling;
} else {
bailout_reason = kRefTypes;
}
break;
case ValueType::kBottom:
case ValueType::kStmt:
UNREACHABLE();
}
LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s %s", type.name().c_str(), context);
unsupported(decoder, bailout_reason, buffer.begin());
@ -3860,9 +3851,9 @@ class LiftoffCompiler {
unsupported(decoder, kRefTypes, "table.fill");
}
void StructNewWithRtt(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) {
void StructNew(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const Value& rtt,
bool initial_values_on_stack) {
ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor =
@ -3883,19 +3874,31 @@ class LiftoffCompiler {
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
ValueType field_type = imm.struct_type->field(i);
LiftoffRegister value = initial_values_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
reg_class_for(field_type), pinned));
if (!initial_values_on_stack) {
if (!CheckSupportedType(decoder, field_type, "default value")) return;
SetDefaultValue(value, field_type, pinned);
}
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
pinned.clear(value);
}
__ PushRegister(struct_value_type, obj);
}
void StructNewWithRtt(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) {
StructNew(decoder, imm, rtt, true);
}
void StructNewDefault(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "struct.new_default_with_rtt");
StructNew(decoder, imm, rtt, false);
}
void StructGet(FullDecoder* decoder, const Value& struct_obj,
@ -4126,33 +4129,37 @@ class LiftoffCompiler {
IsolateData::root_slot_offset(index), {});
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
}
void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
const Value& parent, Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "rtt.sub");
ValueType parent_value_type = parent.type;
ValueType rtt_value_type =
ValueType::Rtt(imm.type, parent_value_type.depth() + 1);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type};
FunctionSig sig(1, 2, sig_reps);
LiftoffAssembler::VarState parent_var =
__ cache_state()->stack_state.end()[-1];
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(type_reg, WasmValue(imm.type.representation()));
LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
__ CallRuntimeStub(target);
DefineSafepoint();
// Drop the parent RTT.
__ cache_state()->stack_state.pop_back(1);
__ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
}
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "ref.test");
}
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "ref.cast");
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result_on_branch, uint32_t depth) {
// Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch.
if (depth != decoder->control_depth() - 1) {
__ MaterializeMergedConstants(
decoder->control_at(depth)->br_merge()->arity);
}
Label branch, cont_false;
LiftoffRegList pinned;
// Falls through on match (=successful type check).
// Returns the register containing the object.
LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj,
const Value& rtt, Label* no_match,
LiftoffRegList pinned = {},
Register opt_scratch = no_reg) {
Label match;
LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
@ -4160,35 +4167,36 @@ class LiftoffCompiler {
bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
if (i31_check_only) {
__ emit_smi_check(obj_reg.gp(), &cont_false,
__ emit_smi_check(obj_reg.gp(), no_match,
LiftoffAssembler::kJumpOnNotSmi);
// Emit no further code, just fall through to taking the branch.
// Emit no further code, just fall through to {match}.
} else {
// Reserve all temporary registers up front, so that the cache state
// tracking doesn't get confused by the following conditional jumps.
LiftoffRegister tmp1 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister tmp1 =
opt_scratch != no_reg
? LiftoffRegister(opt_scratch)
: pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
if (obj_can_be_i31) {
DCHECK(!rtt_is_i31);
__ emit_smi_check(obj_reg.gp(), &cont_false,
LiftoffAssembler::kJumpOnSmi);
__ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
}
if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, &cont_false, obj.type, obj_reg.gp(),
tmp1.gp());
__ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
}
// At this point, the object is neither null nor an i31ref. Perform
// a regular type check. Check for exact match first.
__ LoadMap(tmp1.gp(), obj_reg.gp());
// {tmp1} now holds the object's map.
__ emit_cond_jump(kEqual, &branch, rtt.type, tmp1.gp(), rtt_reg.gp());
__ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp());
// If the object isn't guaranteed to be an array or struct, check that.
// Subsequent code wouldn't handle e.g. funcrefs.
if (!is_data_ref_type(obj.type, decoder->module_)) {
EmitDataRefCheck(tmp1.gp(), &cont_false, tmp2, pinned);
EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
}
// Constant-time subtyping check: load exactly one candidate RTT from the
@ -4206,19 +4214,59 @@ class LiftoffCompiler {
// Step 3: check the list's length.
LiftoffRegister list_length = tmp2;
__ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
__ emit_i32_cond_jumpi(kUnsignedLessEqual, &cont_false, list_length.gp(),
__ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
rtt.type.depth());
// Step 4: load the candidate list slot into {tmp1}, and compare it.
__ LoadTaggedPointer(
tmp1.gp(), tmp1.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
pinned);
__ emit_cond_jump(kUnequal, &cont_false, rtt.type, tmp1.gp(),
rtt_reg.gp());
// Fall through to taking the branch.
__ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
// Fall through to {match}.
}
__ bind(&match);
return obj_reg;
}
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result_val) {
Label return_false, done;
LiftoffRegList pinned;
LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
SubtypeCheck(decoder, obj, rtt, &return_false, pinned, result.gp());
__ LoadConstant(result, WasmValue(1));
// TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
__ emit_jump(&done);
__ bind(&return_false);
__ LoadConstant(result, WasmValue(0));
__ bind(&done);
__ PushRegister(kWasmI32, result);
}
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
Label* trap_label = AddOutOfLineTrap(decoder->position(),
WasmCode::kThrowWasmTrapIllegalCast);
LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, trap_label);
__ PushRegister(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
obj_reg);
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result_on_branch, uint32_t depth) {
// Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch.
if (depth != decoder->control_depth() - 1) {
__ MaterializeMergedConstants(
decoder->control_at(depth)->br_merge()->arity);
}
__ bind(&branch);
Label cont_false;
LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, &cont_false);
__ PushRegister(rtt.type.is_bottom()
? kWasmBottom
: ValueType::Ref(rtt.type.heap_type(), kNonNullable),

View File

@ -196,9 +196,6 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
func_body, func_index_,
for_debugging_, counters, detected);
if (result.succeeded()) break;
// In --liftoff-only mode, we should have aborted the process
// on bailout, i.e. before getting here.
DCHECK(!FLAG_liftoff_only);
}
// If Liftoff failed, fall back to turbofan.

View File

@ -86,6 +86,7 @@ struct WasmModule;
V(RecordWrite) \
V(ToNumber) \
V(WasmAllocateArrayWithRtt) \
V(WasmAllocateRtt) \
V(WasmAllocateStructWithRtt)
// Sorted, disjoint and non-overlapping memory regions. A region is of the

View File

@ -745,8 +745,9 @@ WASM_COMPILED_EXEC_TEST(WasmPackedArrayS) {
tester.CheckResult(kF, static_cast<int16_t>(expected_outputs[3]), 3);
}
TEST(NewDefault) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(NewDefault) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
const byte struct_type = tester.DefineStruct(
{F(wasm::kWasmI32, true), F(wasm::kWasmF64, true), F(optref(0), true)});
const byte array_type = tester.DefineArray(wasm::kWasmI32, true);
@ -873,8 +874,9 @@ TEST(BasicRTT) {
tester.CheckResult(kRefCast, 43);
}
TEST(AnyRefRtt) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(AnyRefRtt) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
ValueType any_rtt_0_type = ValueType::Rtt(HeapType::kAny, 0);
FunctionSig sig_any_canon(1, 0, &any_rtt_0_type);
@ -947,8 +949,10 @@ TEST(AnyRefRtt) {
tester.CheckResult(kCheckAnyAgainstAny, 1);
}
TEST(ArrayNewMap) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
const byte type_index = tester.DefineArray(kWasmI32, true);
ValueType array_type = ValueType::Ref(type_index, kNonNullable);
@ -1065,8 +1069,9 @@ TEST(CallRef) {
tester.CheckResult(caller, 47, 5);
}
TEST(RefTestCastNull) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(RefTestCastNull) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
const byte kRefTestNull = tester.DefineFunction(
@ -1266,8 +1271,9 @@ TEST(CastsBenchmark) {
tester.CheckResult(Main, (kListLength * (kListLength - 1) / 2) * kLoops);
}
TEST(GlobalInitReferencingGlobal) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(GlobalInitReferencingGlobal) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
const byte from = tester.AddGlobal(kWasmI32, false, WasmInitExpr(42));
const byte to =
tester.AddGlobal(kWasmI32, false, WasmInitExpr::GlobalGet(from));
@ -1280,8 +1286,9 @@ TEST(GlobalInitReferencingGlobal) {
tester.CheckResult(func, 42);
}
TEST(IndirectNullSetManually) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(IndirectNullSetManually) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
byte sig_index = tester.DefineSignature(tester.sigs.i_i());
tester.DefineTable(ValueType::Ref(sig_index, kNullable), 1, 1);
byte func_index = tester.DefineFunction(