Reland "[wasm-gc] Liftoff support part 4: subtyping"
This is a reland of dc369749c7
Changes: relaxed --liftoff-only mode to still allow bailing
out due to missing CPU support.
Original change's description:
> [wasm-gc] Liftoff support part 4: subtyping
>
> This adds support for the following instructions:
> struct.new_default, rtt.sub, ref.test, ref.cast
>
> Bug: v8:7748
> Change-Id: I7423ddd7a83c80cb1e82c620780c27bec59ec762
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2593341
> Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71805}
Bug: v8:7748
Change-Id: If31fcee5e7e173d7c2a6e1c624f4ff04cec7fe9c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2596338
Auto-Submit: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71823}
This commit is contained in:
parent
63b78f2b01
commit
f475e99021
@ -231,9 +231,9 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
|
|||||||
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
|
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
builtin WasmAllocateRtt(implicit context: Context)(
|
builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
|
||||||
typeIndex: Smi, parent: Map): Map {
|
tail runtime::WasmAllocateRtt(
|
||||||
tail runtime::WasmAllocateRtt(context, typeIndex, parent);
|
LoadContextFromFrame(), SmiTag(typeIndex), parent);
|
||||||
}
|
}
|
||||||
|
|
||||||
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
|
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
|
||||||
|
@ -5701,12 +5701,10 @@ Node* WasmGraphBuilder::RttCanon(wasm::HeapType type) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Node* WasmGraphBuilder::RttSub(wasm::HeapType type, Node* parent_rtt) {
|
Node* WasmGraphBuilder::RttSub(wasm::HeapType type, Node* parent_rtt) {
|
||||||
return CALL_BUILTIN(
|
return CALL_BUILTIN(WasmAllocateRtt,
|
||||||
WasmAllocateRtt,
|
graph()->NewNode(mcgraph()->common()->Int32Constant(
|
||||||
graph()->NewNode(
|
type.representation())),
|
||||||
mcgraph()->common()->NumberConstant(type.representation())),
|
parent_rtt);
|
||||||
parent_rtt,
|
|
||||||
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
|
void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
|
||||||
|
@ -403,7 +403,10 @@ class LiftoffCompiler {
|
|||||||
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
|
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
|
||||||
detail);
|
detail);
|
||||||
UnuseLabels(decoder);
|
UnuseLabels(decoder);
|
||||||
if (FLAG_liftoff_only) {
|
// --liftoff-only ensures that tests actually exercise the Liftoff path
|
||||||
|
// without bailing out. Bailing out due to (simulated) lack of CPU support
|
||||||
|
// is okay though.
|
||||||
|
if (FLAG_liftoff_only && reason != kMissingCPUFeature) {
|
||||||
FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s",
|
FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s",
|
||||||
detail);
|
detail);
|
||||||
}
|
}
|
||||||
@ -415,26 +418,9 @@ class LiftoffCompiler {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
LiftoffBailoutReason BailoutReasonForType(ValueType type) {
|
|
||||||
switch (type.kind()) {
|
|
||||||
case ValueType::kS128:
|
|
||||||
return kSimd;
|
|
||||||
case ValueType::kOptRef:
|
|
||||||
case ValueType::kRef:
|
|
||||||
if (type.is_reference_to(HeapType::kExn)) {
|
|
||||||
return kExceptionHandling;
|
|
||||||
} else {
|
|
||||||
return kRefTypes;
|
|
||||||
}
|
|
||||||
case ValueType::kBottom:
|
|
||||||
return kMultiValue;
|
|
||||||
default:
|
|
||||||
return kOtherReason;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool CheckSupportedType(FullDecoder* decoder, ValueType type,
|
bool CheckSupportedType(FullDecoder* decoder, ValueType type,
|
||||||
const char* context) {
|
const char* context) {
|
||||||
|
LiftoffBailoutReason bailout_reason = kOtherReason;
|
||||||
switch (type.kind()) {
|
switch (type.kind()) {
|
||||||
case ValueType::kI32:
|
case ValueType::kI32:
|
||||||
case ValueType::kI64:
|
case ValueType::kI64:
|
||||||
@ -443,6 +429,7 @@ class LiftoffCompiler {
|
|||||||
return true;
|
return true;
|
||||||
case ValueType::kS128:
|
case ValueType::kS128:
|
||||||
if (CpuFeatures::SupportsWasmSimd128()) return true;
|
if (CpuFeatures::SupportsWasmSimd128()) return true;
|
||||||
|
bailout_reason = kMissingCPUFeature;
|
||||||
break;
|
break;
|
||||||
case ValueType::kRef:
|
case ValueType::kRef:
|
||||||
case ValueType::kOptRef:
|
case ValueType::kOptRef:
|
||||||
@ -450,12 +437,16 @@ class LiftoffCompiler {
|
|||||||
case ValueType::kI8:
|
case ValueType::kI8:
|
||||||
case ValueType::kI16:
|
case ValueType::kI16:
|
||||||
if (FLAG_experimental_liftoff_extern_ref) return true;
|
if (FLAG_experimental_liftoff_extern_ref) return true;
|
||||||
|
if (type.is_reference_to(HeapType::kExn)) {
|
||||||
|
bailout_reason = kExceptionHandling;
|
||||||
|
} else {
|
||||||
|
bailout_reason = kRefTypes;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case ValueType::kBottom:
|
case ValueType::kBottom:
|
||||||
case ValueType::kStmt:
|
case ValueType::kStmt:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
|
|
||||||
EmbeddedVector<char, 128> buffer;
|
EmbeddedVector<char, 128> buffer;
|
||||||
SNPrintF(buffer, "%s %s", type.name().c_str(), context);
|
SNPrintF(buffer, "%s %s", type.name().c_str(), context);
|
||||||
unsupported(decoder, bailout_reason, buffer.begin());
|
unsupported(decoder, bailout_reason, buffer.begin());
|
||||||
@ -3860,9 +3851,9 @@ class LiftoffCompiler {
|
|||||||
unsupported(decoder, kRefTypes, "table.fill");
|
unsupported(decoder, kRefTypes, "table.fill");
|
||||||
}
|
}
|
||||||
|
|
||||||
void StructNewWithRtt(FullDecoder* decoder,
|
void StructNew(FullDecoder* decoder,
|
||||||
const StructIndexImmediate<validate>& imm,
|
const StructIndexImmediate<validate>& imm, const Value& rtt,
|
||||||
const Value& rtt, const Value args[], Value* result) {
|
bool initial_values_on_stack) {
|
||||||
ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
|
ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
|
||||||
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
|
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
|
||||||
compiler::CallDescriptor* call_descriptor =
|
compiler::CallDescriptor* call_descriptor =
|
||||||
@ -3883,19 +3874,31 @@ class LiftoffCompiler {
|
|||||||
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
|
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
|
||||||
i--;
|
i--;
|
||||||
int offset = StructFieldOffset(imm.struct_type, i);
|
int offset = StructFieldOffset(imm.struct_type, i);
|
||||||
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
|
|
||||||
ValueType field_type = imm.struct_type->field(i);
|
ValueType field_type = imm.struct_type->field(i);
|
||||||
|
LiftoffRegister value = initial_values_on_stack
|
||||||
|
? pinned.set(__ PopToRegister(pinned))
|
||||||
|
: pinned.set(__ GetUnusedRegister(
|
||||||
|
reg_class_for(field_type), pinned));
|
||||||
|
if (!initial_values_on_stack) {
|
||||||
|
if (!CheckSupportedType(decoder, field_type, "default value")) return;
|
||||||
|
SetDefaultValue(value, field_type, pinned);
|
||||||
|
}
|
||||||
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
|
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
|
||||||
pinned.clear(value);
|
pinned.clear(value);
|
||||||
}
|
}
|
||||||
__ PushRegister(struct_value_type, obj);
|
__ PushRegister(struct_value_type, obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void StructNewWithRtt(FullDecoder* decoder,
|
||||||
|
const StructIndexImmediate<validate>& imm,
|
||||||
|
const Value& rtt, const Value args[], Value* result) {
|
||||||
|
StructNew(decoder, imm, rtt, true);
|
||||||
|
}
|
||||||
|
|
||||||
void StructNewDefault(FullDecoder* decoder,
|
void StructNewDefault(FullDecoder* decoder,
|
||||||
const StructIndexImmediate<validate>& imm,
|
const StructIndexImmediate<validate>& imm,
|
||||||
const Value& rtt, Value* result) {
|
const Value& rtt, Value* result) {
|
||||||
// TODO(7748): Implement.
|
StructNew(decoder, imm, rtt, false);
|
||||||
unsupported(decoder, kGC, "struct.new_default_with_rtt");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void StructGet(FullDecoder* decoder, const Value& struct_obj,
|
void StructGet(FullDecoder* decoder, const Value& struct_obj,
|
||||||
@ -4126,33 +4129,37 @@ class LiftoffCompiler {
|
|||||||
IsolateData::root_slot_offset(index), {});
|
IsolateData::root_slot_offset(index), {});
|
||||||
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
|
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
|
void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
|
||||||
const Value& parent, Value* result) {
|
const Value& parent, Value* result) {
|
||||||
// TODO(7748): Implement.
|
ValueType parent_value_type = parent.type;
|
||||||
unsupported(decoder, kGC, "rtt.sub");
|
ValueType rtt_value_type =
|
||||||
|
ValueType::Rtt(imm.type, parent_value_type.depth() + 1);
|
||||||
|
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
|
||||||
|
compiler::CallDescriptor* call_descriptor =
|
||||||
|
GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
|
||||||
|
ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type};
|
||||||
|
FunctionSig sig(1, 2, sig_reps);
|
||||||
|
LiftoffAssembler::VarState parent_var =
|
||||||
|
__ cache_state()->stack_state.end()[-1];
|
||||||
|
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
|
||||||
|
__ LoadConstant(type_reg, WasmValue(imm.type.representation()));
|
||||||
|
LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0);
|
||||||
|
__ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
|
||||||
|
__ CallRuntimeStub(target);
|
||||||
|
DefineSafepoint();
|
||||||
|
// Drop the parent RTT.
|
||||||
|
__ cache_state()->stack_state.pop_back(1);
|
||||||
|
__ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
|
||||||
}
|
}
|
||||||
|
|
||||||
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
|
// Falls through on match (=successful type check).
|
||||||
Value* result) {
|
// Returns the register containing the object.
|
||||||
// TODO(7748): Implement.
|
LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj,
|
||||||
unsupported(decoder, kGC, "ref.test");
|
const Value& rtt, Label* no_match,
|
||||||
}
|
LiftoffRegList pinned = {},
|
||||||
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
|
Register opt_scratch = no_reg) {
|
||||||
Value* result) {
|
Label match;
|
||||||
// TODO(7748): Implement.
|
|
||||||
unsupported(decoder, kGC, "ref.cast");
|
|
||||||
}
|
|
||||||
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
|
|
||||||
Value* result_on_branch, uint32_t depth) {
|
|
||||||
// Before branching, materialize all constants. This avoids repeatedly
|
|
||||||
// materializing them for each conditional branch.
|
|
||||||
if (depth != decoder->control_depth() - 1) {
|
|
||||||
__ MaterializeMergedConstants(
|
|
||||||
decoder->control_at(depth)->br_merge()->arity);
|
|
||||||
}
|
|
||||||
|
|
||||||
Label branch, cont_false;
|
|
||||||
LiftoffRegList pinned;
|
|
||||||
LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
|
LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
|
||||||
LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
|
LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
|
||||||
|
|
||||||
@ -4160,35 +4167,36 @@ class LiftoffCompiler {
|
|||||||
bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
|
bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
|
||||||
bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
|
bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
|
||||||
if (i31_check_only) {
|
if (i31_check_only) {
|
||||||
__ emit_smi_check(obj_reg.gp(), &cont_false,
|
__ emit_smi_check(obj_reg.gp(), no_match,
|
||||||
LiftoffAssembler::kJumpOnNotSmi);
|
LiftoffAssembler::kJumpOnNotSmi);
|
||||||
// Emit no further code, just fall through to taking the branch.
|
// Emit no further code, just fall through to {match}.
|
||||||
} else {
|
} else {
|
||||||
// Reserve all temporary registers up front, so that the cache state
|
// Reserve all temporary registers up front, so that the cache state
|
||||||
// tracking doesn't get confused by the following conditional jumps.
|
// tracking doesn't get confused by the following conditional jumps.
|
||||||
LiftoffRegister tmp1 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
LiftoffRegister tmp1 =
|
||||||
|
opt_scratch != no_reg
|
||||||
|
? LiftoffRegister(opt_scratch)
|
||||||
|
: pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||||
LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
|
||||||
if (obj_can_be_i31) {
|
if (obj_can_be_i31) {
|
||||||
DCHECK(!rtt_is_i31);
|
DCHECK(!rtt_is_i31);
|
||||||
__ emit_smi_check(obj_reg.gp(), &cont_false,
|
__ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
|
||||||
LiftoffAssembler::kJumpOnSmi);
|
|
||||||
}
|
}
|
||||||
if (obj.type.is_nullable()) {
|
if (obj.type.is_nullable()) {
|
||||||
LoadNullValue(tmp1.gp(), pinned);
|
LoadNullValue(tmp1.gp(), pinned);
|
||||||
__ emit_cond_jump(kEqual, &cont_false, obj.type, obj_reg.gp(),
|
__ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
|
||||||
tmp1.gp());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// At this point, the object is neither null nor an i31ref. Perform
|
// At this point, the object is neither null nor an i31ref. Perform
|
||||||
// a regular type check. Check for exact match first.
|
// a regular type check. Check for exact match first.
|
||||||
__ LoadMap(tmp1.gp(), obj_reg.gp());
|
__ LoadMap(tmp1.gp(), obj_reg.gp());
|
||||||
// {tmp1} now holds the object's map.
|
// {tmp1} now holds the object's map.
|
||||||
__ emit_cond_jump(kEqual, &branch, rtt.type, tmp1.gp(), rtt_reg.gp());
|
__ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp());
|
||||||
|
|
||||||
// If the object isn't guaranteed to be an array or struct, check that.
|
// If the object isn't guaranteed to be an array or struct, check that.
|
||||||
// Subsequent code wouldn't handle e.g. funcrefs.
|
// Subsequent code wouldn't handle e.g. funcrefs.
|
||||||
if (!is_data_ref_type(obj.type, decoder->module_)) {
|
if (!is_data_ref_type(obj.type, decoder->module_)) {
|
||||||
EmitDataRefCheck(tmp1.gp(), &cont_false, tmp2, pinned);
|
EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constant-time subtyping check: load exactly one candidate RTT from the
|
// Constant-time subtyping check: load exactly one candidate RTT from the
|
||||||
@ -4206,19 +4214,59 @@ class LiftoffCompiler {
|
|||||||
// Step 3: check the list's length.
|
// Step 3: check the list's length.
|
||||||
LiftoffRegister list_length = tmp2;
|
LiftoffRegister list_length = tmp2;
|
||||||
__ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
|
__ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
|
||||||
__ emit_i32_cond_jumpi(kUnsignedLessEqual, &cont_false, list_length.gp(),
|
__ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
|
||||||
rtt.type.depth());
|
rtt.type.depth());
|
||||||
// Step 4: load the candidate list slot into {tmp1}, and compare it.
|
// Step 4: load the candidate list slot into {tmp1}, and compare it.
|
||||||
__ LoadTaggedPointer(
|
__ LoadTaggedPointer(
|
||||||
tmp1.gp(), tmp1.gp(), no_reg,
|
tmp1.gp(), tmp1.gp(), no_reg,
|
||||||
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
|
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
|
||||||
pinned);
|
pinned);
|
||||||
__ emit_cond_jump(kUnequal, &cont_false, rtt.type, tmp1.gp(),
|
__ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
|
||||||
rtt_reg.gp());
|
// Fall through to {match}.
|
||||||
// Fall through to taking the branch.
|
}
|
||||||
|
__ bind(&match);
|
||||||
|
return obj_reg;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
|
||||||
|
Value* result_val) {
|
||||||
|
Label return_false, done;
|
||||||
|
LiftoffRegList pinned;
|
||||||
|
LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
|
||||||
|
|
||||||
|
SubtypeCheck(decoder, obj, rtt, &return_false, pinned, result.gp());
|
||||||
|
|
||||||
|
__ LoadConstant(result, WasmValue(1));
|
||||||
|
// TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
|
||||||
|
__ emit_jump(&done);
|
||||||
|
|
||||||
|
__ bind(&return_false);
|
||||||
|
__ LoadConstant(result, WasmValue(0));
|
||||||
|
__ bind(&done);
|
||||||
|
__ PushRegister(kWasmI32, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
|
||||||
|
Value* result) {
|
||||||
|
Label* trap_label = AddOutOfLineTrap(decoder->position(),
|
||||||
|
WasmCode::kThrowWasmTrapIllegalCast);
|
||||||
|
LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, trap_label);
|
||||||
|
__ PushRegister(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
|
||||||
|
obj_reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
|
||||||
|
Value* result_on_branch, uint32_t depth) {
|
||||||
|
// Before branching, materialize all constants. This avoids repeatedly
|
||||||
|
// materializing them for each conditional branch.
|
||||||
|
if (depth != decoder->control_depth() - 1) {
|
||||||
|
__ MaterializeMergedConstants(
|
||||||
|
decoder->control_at(depth)->br_merge()->arity);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ bind(&branch);
|
Label cont_false;
|
||||||
|
LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, &cont_false);
|
||||||
|
|
||||||
__ PushRegister(rtt.type.is_bottom()
|
__ PushRegister(rtt.type.is_bottom()
|
||||||
? kWasmBottom
|
? kWasmBottom
|
||||||
: ValueType::Ref(rtt.type.heap_type(), kNonNullable),
|
: ValueType::Ref(rtt.type.heap_type(), kNonNullable),
|
||||||
|
@ -196,9 +196,6 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
|
|||||||
func_body, func_index_,
|
func_body, func_index_,
|
||||||
for_debugging_, counters, detected);
|
for_debugging_, counters, detected);
|
||||||
if (result.succeeded()) break;
|
if (result.succeeded()) break;
|
||||||
// In --liftoff-only mode, we should have aborted the process
|
|
||||||
// on bailout, i.e. before getting here.
|
|
||||||
DCHECK(!FLAG_liftoff_only);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If Liftoff failed, fall back to turbofan.
|
// If Liftoff failed, fall back to turbofan.
|
||||||
|
@ -86,6 +86,7 @@ struct WasmModule;
|
|||||||
V(RecordWrite) \
|
V(RecordWrite) \
|
||||||
V(ToNumber) \
|
V(ToNumber) \
|
||||||
V(WasmAllocateArrayWithRtt) \
|
V(WasmAllocateArrayWithRtt) \
|
||||||
|
V(WasmAllocateRtt) \
|
||||||
V(WasmAllocateStructWithRtt)
|
V(WasmAllocateStructWithRtt)
|
||||||
|
|
||||||
// Sorted, disjoint and non-overlapping memory regions. A region is of the
|
// Sorted, disjoint and non-overlapping memory regions. A region is of the
|
||||||
|
@ -745,8 +745,9 @@ WASM_COMPILED_EXEC_TEST(WasmPackedArrayS) {
|
|||||||
tester.CheckResult(kF, static_cast<int16_t>(expected_outputs[3]), 3);
|
tester.CheckResult(kF, static_cast<int16_t>(expected_outputs[3]), 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(NewDefault) {
|
WASM_COMPILED_EXEC_TEST(NewDefault) {
|
||||||
WasmGCTester tester;
|
WasmGCTester tester(execution_tier);
|
||||||
|
FLAG_experimental_liftoff_extern_ref = true;
|
||||||
const byte struct_type = tester.DefineStruct(
|
const byte struct_type = tester.DefineStruct(
|
||||||
{F(wasm::kWasmI32, true), F(wasm::kWasmF64, true), F(optref(0), true)});
|
{F(wasm::kWasmI32, true), F(wasm::kWasmF64, true), F(optref(0), true)});
|
||||||
const byte array_type = tester.DefineArray(wasm::kWasmI32, true);
|
const byte array_type = tester.DefineArray(wasm::kWasmI32, true);
|
||||||
@ -873,8 +874,9 @@ TEST(BasicRTT) {
|
|||||||
tester.CheckResult(kRefCast, 43);
|
tester.CheckResult(kRefCast, 43);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AnyRefRtt) {
|
WASM_COMPILED_EXEC_TEST(AnyRefRtt) {
|
||||||
WasmGCTester tester;
|
WasmGCTester tester(execution_tier);
|
||||||
|
FLAG_experimental_liftoff_extern_ref = true;
|
||||||
|
|
||||||
ValueType any_rtt_0_type = ValueType::Rtt(HeapType::kAny, 0);
|
ValueType any_rtt_0_type = ValueType::Rtt(HeapType::kAny, 0);
|
||||||
FunctionSig sig_any_canon(1, 0, &any_rtt_0_type);
|
FunctionSig sig_any_canon(1, 0, &any_rtt_0_type);
|
||||||
@ -947,8 +949,10 @@ TEST(AnyRefRtt) {
|
|||||||
tester.CheckResult(kCheckAnyAgainstAny, 1);
|
tester.CheckResult(kCheckAnyAgainstAny, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(ArrayNewMap) {
|
WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
|
||||||
WasmGCTester tester;
|
WasmGCTester tester(execution_tier);
|
||||||
|
FLAG_experimental_liftoff_extern_ref = true;
|
||||||
|
|
||||||
const byte type_index = tester.DefineArray(kWasmI32, true);
|
const byte type_index = tester.DefineArray(kWasmI32, true);
|
||||||
|
|
||||||
ValueType array_type = ValueType::Ref(type_index, kNonNullable);
|
ValueType array_type = ValueType::Ref(type_index, kNonNullable);
|
||||||
@ -1065,8 +1069,9 @@ TEST(CallRef) {
|
|||||||
tester.CheckResult(caller, 47, 5);
|
tester.CheckResult(caller, 47, 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(RefTestCastNull) {
|
WASM_COMPILED_EXEC_TEST(RefTestCastNull) {
|
||||||
WasmGCTester tester;
|
WasmGCTester tester(execution_tier);
|
||||||
|
FLAG_experimental_liftoff_extern_ref = true;
|
||||||
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
|
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
|
||||||
|
|
||||||
const byte kRefTestNull = tester.DefineFunction(
|
const byte kRefTestNull = tester.DefineFunction(
|
||||||
@ -1266,8 +1271,9 @@ TEST(CastsBenchmark) {
|
|||||||
tester.CheckResult(Main, (kListLength * (kListLength - 1) / 2) * kLoops);
|
tester.CheckResult(Main, (kListLength * (kListLength - 1) / 2) * kLoops);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(GlobalInitReferencingGlobal) {
|
WASM_COMPILED_EXEC_TEST(GlobalInitReferencingGlobal) {
|
||||||
WasmGCTester tester;
|
WasmGCTester tester(execution_tier);
|
||||||
|
FLAG_experimental_liftoff_extern_ref = true;
|
||||||
const byte from = tester.AddGlobal(kWasmI32, false, WasmInitExpr(42));
|
const byte from = tester.AddGlobal(kWasmI32, false, WasmInitExpr(42));
|
||||||
const byte to =
|
const byte to =
|
||||||
tester.AddGlobal(kWasmI32, false, WasmInitExpr::GlobalGet(from));
|
tester.AddGlobal(kWasmI32, false, WasmInitExpr::GlobalGet(from));
|
||||||
@ -1280,8 +1286,9 @@ TEST(GlobalInitReferencingGlobal) {
|
|||||||
tester.CheckResult(func, 42);
|
tester.CheckResult(func, 42);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(IndirectNullSetManually) {
|
WASM_COMPILED_EXEC_TEST(IndirectNullSetManually) {
|
||||||
WasmGCTester tester;
|
WasmGCTester tester(execution_tier);
|
||||||
|
FLAG_experimental_liftoff_extern_ref = true;
|
||||||
byte sig_index = tester.DefineSignature(tester.sigs.i_i());
|
byte sig_index = tester.DefineSignature(tester.sigs.i_i());
|
||||||
tester.DefineTable(ValueType::Ref(sig_index, kNullable), 1, 1);
|
tester.DefineTable(ValueType::Ref(sig_index, kNullable), 1, 1);
|
||||||
byte func_index = tester.DefineFunction(
|
byte func_index = tester.DefineFunction(
|
||||||
|
Loading…
Reference in New Issue
Block a user