Reland "[wasm][liftoff][ia32][x64] Detect SIMD NaNs for fuzzing"

This is a reland of b0bcedccfd
Changes:
- Consistently use int32_t for max_steps and nondeterminism
- Skip SIMD tests on architectures that don't support it

Original change's description:
> [wasm][liftoff][ia32][x64] Detect SIMD NaNs for fuzzing
>
> R=clemensb@chromium.org
>
> Bug: v8:11856
> Change-Id: I9764e3e2944690ed0883afdab20afd47fdd4acfa
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2979605
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75512}

Bug: v8:11856
Change-Id: I0a7858d1c21c0dfb961b9b2c3fa1074f9362886a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3001178
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Commit-Queue: Thibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75557}
This commit is contained in:
Thibaud Michaud 2021-07-02 17:53:09 +02:00 committed by V8 LUCI CQ
parent 32328edd54
commit 6f48b7b369
10 changed files with 199 additions and 63 deletions

View File

@ -4239,6 +4239,13 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
Register tmp_gp,
DoubleRegister tmp_fp,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();

View File

@ -3242,6 +3242,13 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
UNIMPLEMENTED();
}
void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
Register tmp_gp,
DoubleRegister tmp_fp,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
// The stack pointer is required to be quadword aligned.

View File

@ -4822,6 +4822,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
Register tmp_gp,
DoubleRegister tmp_fp,
ValueKind lane_kind) {
if (lane_kind == kF32) {
movaps(tmp_fp, src);
cmpunordps(tmp_fp, tmp_fp);
} else {
DCHECK_EQ(lane_kind, kF64);
movapd(tmp_fp, src);
cmpunordpd(tmp_fp, tmp_fp);
}
pmovmskb(tmp_gp, tmp_fp);
or_(Operand(dst, 0), tmp_gp);
}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();

View File

@ -1458,6 +1458,11 @@ class LiftoffAssembler : public TurboAssembler {
// Set the i32 at address dst to 1 if src is a NaN.
inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind);
// Set the i32 at address dst to a non-zero value if src contains a NaN.
inline void emit_s128_set_if_nan(Register dst, DoubleRegister src,
Register tmp_gp, DoubleRegister tmp_fp,
ValueKind lane_kind);
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////

View File

@ -458,8 +458,8 @@ class LiftoffCompiler {
DebugSideTableBuilder* debug_sidetable_builder,
ForDebugging for_debugging, int func_index,
base::Vector<const int> breakpoints = {},
int dead_breakpoint = 0, int* max_steps = nullptr,
bool* nondeterminism = nullptr)
int dead_breakpoint = 0, int32_t* max_steps = nullptr,
int32_t* nondeterminism = nullptr)
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
@ -1454,7 +1454,8 @@ class LiftoffCompiler {
CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
}
template <ValueKind src_kind, ValueKind result_kind, class EmitFn>
template <ValueKind src_kind, ValueKind result_kind,
ValueKind result_lane_kind = kVoid, class EmitFn>
void EmitUnOp(EmitFn fn) {
constexpr RegClass src_rc = reg_class_for(src_kind);
constexpr RegClass result_rc = reg_class_for(result_kind);
@ -1463,9 +1464,14 @@ class LiftoffCompiler {
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
if (V8_UNLIKELY(nondeterminism_) &&
(result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64)) {
CheckNan(dst, LiftoffRegList::ForRegs(src, dst), result_kind);
if (V8_UNLIKELY(nondeterminism_)) {
auto pinned = LiftoffRegList::ForRegs(dst);
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
(result_lane_kind == kF32 || result_lane_kind == kF64)) {
CheckS128Nan(dst, pinned, result_lane_kind);
}
}
__ PushRegister(result_kind, dst);
}
@ -1696,7 +1702,8 @@ class LiftoffCompiler {
}
template <ValueKind src_kind, ValueKind result_kind,
bool swap_lhs_rhs = false, typename EmitFn>
bool swap_lhs_rhs = false, ValueKind result_lane_kind = kVoid,
typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
@ -1709,9 +1716,14 @@ class LiftoffCompiler {
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
if (V8_UNLIKELY(nondeterminism_) &&
(result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64)) {
CheckNan(dst, LiftoffRegList::ForRegs(rhs, lhs, dst), result_kind);
if (V8_UNLIKELY(nondeterminism_)) {
auto pinned = LiftoffRegList::ForRegs(dst);
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
(result_lane_kind == kF32 || result_lane_kind == kF64)) {
CheckS128Nan(dst, pinned, result_lane_kind);
}
}
__ PushRegister(result_kind, dst);
}
@ -3293,7 +3305,8 @@ class LiftoffCompiler {
__ bind(&cont_false);
}
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
template <ValueKind src_kind, ValueKind result_kind,
ValueKind result_lane_kind = kVoid, typename EmitFn>
void EmitTerOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
@ -3309,10 +3322,15 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
if (V8_UNLIKELY(nondeterminism_) &&
(result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64)) {
CheckNan(dst, LiftoffRegList::ForRegs(src1, src2, src3, dst),
result_kind);
if (V8_UNLIKELY(nondeterminism_)) {
auto pinned = LiftoffRegList::ForRegs(dst);
if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
CheckNan(dst, pinned, result_kind);
} else if (result_kind == ValueKind::kS128 &&
(result_lane_kind == kF32 || result_lane_kind == kF64)) {
CheckS128Nan(dst, LiftoffRegList::ForRegs(src1, src2, src3, dst),
result_lane_kind);
}
}
__ PushRegister(result_kind, dst);
}
@ -3342,6 +3360,7 @@ class LiftoffCompiler {
}
}
template <ValueKind result_lane_kind>
void EmitSimdFloatRoundingOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
ExternalReference (*ext_ref)()) {
@ -3353,6 +3372,10 @@ class LiftoffCompiler {
auto sig_v_s = MakeSig::Params(kS128);
GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
if (V8_UNLIKELY(nondeterminism_)) {
auto pinned = LiftoffRegList::ForRegs(dst);
CheckS128Nan(dst, pinned, result_lane_kind);
}
__ PushRegister(kS128, dst);
}
@ -3375,9 +3398,9 @@ class LiftoffCompiler {
case wasm::kExprI64x2Splat:
return EmitUnOp<kI64, kS128>(&LiftoffAssembler::emit_i64x2_splat);
case wasm::kExprF32x4Splat:
return EmitUnOp<kF32, kS128>(&LiftoffAssembler::emit_f32x4_splat);
return EmitUnOp<kF32, kS128, kF32>(&LiftoffAssembler::emit_f32x4_splat);
case wasm::kExprF64x2Splat:
return EmitUnOp<kF64, kS128>(&LiftoffAssembler::emit_f64x2_splat);
return EmitUnOp<kF64, kS128, kF64>(&LiftoffAssembler::emit_f64x2_splat);
case wasm::kExprI8x16Eq:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_eq);
case wasm::kExprI8x16Ne:
@ -3689,92 +3712,108 @@ class LiftoffCompiler {
return EmitUnOp<kS128, kS128>(
&LiftoffAssembler::emit_i64x2_uconvert_i32x4_high);
case wasm::kExprF32x4Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_abs);
return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_abs);
case wasm::kExprF32x4Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_neg);
return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_neg);
case wasm::kExprF32x4Sqrt:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_sqrt);
return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_sqrt);
case wasm::kExprF32x4Ceil:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_ceil,
&ExternalReference::wasm_f32x4_ceil);
case wasm::kExprF32x4Floor:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_floor,
ExternalReference::wasm_f32x4_floor);
case wasm::kExprF32x4Trunc:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_trunc,
ExternalReference::wasm_f32x4_trunc);
case wasm::kExprF32x4NearestInt:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF32>(
&LiftoffAssembler::emit_f32x4_nearest_int,
ExternalReference::wasm_f32x4_nearest_int);
case wasm::kExprF32x4Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_add);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_add);
case wasm::kExprF32x4Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_sub);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_sub);
case wasm::kExprF32x4Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_mul);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_mul);
case wasm::kExprF32x4Div:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_div);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_div);
case wasm::kExprF32x4Min:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_min);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_min);
case wasm::kExprF32x4Max:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_max);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_max);
case wasm::kExprF32x4Pmin:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_pmin);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_pmin);
case wasm::kExprF32x4Pmax:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_pmax);
return EmitBinOp<kS128, kS128, false, kF32>(
&LiftoffAssembler::emit_f32x4_pmax);
case wasm::kExprF64x2Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_abs);
return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_abs);
case wasm::kExprF64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_neg);
return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_neg);
case wasm::kExprF64x2Sqrt:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_sqrt);
return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_sqrt);
case wasm::kExprF64x2Ceil:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_ceil,
&ExternalReference::wasm_f64x2_ceil);
case wasm::kExprF64x2Floor:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_floor,
ExternalReference::wasm_f64x2_floor);
case wasm::kExprF64x2Trunc:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_trunc,
ExternalReference::wasm_f64x2_trunc);
case wasm::kExprF64x2NearestInt:
return EmitSimdFloatRoundingOpWithCFallback(
return EmitSimdFloatRoundingOpWithCFallback<kF64>(
&LiftoffAssembler::emit_f64x2_nearest_int,
ExternalReference::wasm_f64x2_nearest_int);
case wasm::kExprF64x2Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_add);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_add);
case wasm::kExprF64x2Sub:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_sub);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_sub);
case wasm::kExprF64x2Mul:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_mul);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_mul);
case wasm::kExprF64x2Div:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_div);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_div);
case wasm::kExprF64x2Min:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_min);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_min);
case wasm::kExprF64x2Max:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_max);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_max);
case wasm::kExprF64x2Pmin:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_pmin);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_pmin);
case wasm::kExprF64x2Pmax:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_pmax);
return EmitBinOp<kS128, kS128, false, kF64>(
&LiftoffAssembler::emit_f64x2_pmax);
case wasm::kExprI32x4SConvertF32x4:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_i32x4_sconvert_f32x4);
case wasm::kExprI32x4UConvertF32x4:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_i32x4_uconvert_f32x4);
case wasm::kExprF32x4SConvertI32x4:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_f32x4_sconvert_i32x4);
case wasm::kExprF32x4UConvertI32x4:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_f32x4_uconvert_i32x4);
case wasm::kExprI8x16SConvertI16x8:
return EmitBinOp<kS128, kS128>(
@ -3829,16 +3868,16 @@ class LiftoffCompiler {
case wasm::kExprI64x2Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_abs);
case wasm::kExprF64x2ConvertLowI32x4S:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF64>(
&LiftoffAssembler::emit_f64x2_convert_low_i32x4_s);
case wasm::kExprF64x2ConvertLowI32x4U:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF64>(
&LiftoffAssembler::emit_f64x2_convert_low_i32x4_u);
case wasm::kExprF64x2PromoteLowF32x4:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF64>(
&LiftoffAssembler::emit_f64x2_promote_low_f32x4);
case wasm::kExprF32x4DemoteF64x2Zero:
return EmitUnOp<kS128, kS128>(
return EmitUnOp<kS128, kS128, kF32>(
&LiftoffAssembler::emit_f32x4_demote_f64x2_zero);
case wasm::kExprI32x4TruncSatF64x2SZero:
return EmitUnOp<kS128, kS128>(
@ -6107,6 +6146,20 @@ class LiftoffCompiler {
__ emit_set_if_nan(nondeterminism_addr.gp(), src.fp(), kind);
}
void CheckS128Nan(LiftoffRegister dst, LiftoffRegList pinned,
ValueKind lane_kind) {
RegClass rc = reg_class_for(kS128);
LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned));
LiftoffRegister nondeterminism_addr =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
nondeterminism_addr,
WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
__ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(),
tmp_fp.fp(), lane_kind);
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
// MVP:
@ -6166,8 +6219,8 @@ class LiftoffCompiler {
// Current number of exception refs on the stack.
int num_exceptions_ = 0;
int* max_steps_;
bool* nondeterminism_;
int32_t* max_steps_;
int32_t* nondeterminism_;
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
@ -6224,7 +6277,7 @@ WasmCompilationResult ExecuteLiftoffCompilation(
ForDebugging for_debugging, Counters* counters, WasmFeatures* detected,
base::Vector<const int> breakpoints,
std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint,
int* max_steps, bool* nondeterminism) {
int32_t* max_steps, int32_t* nondeterminism) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileBaseline", "funcIndex", func_index, "bodySize",

View File

@ -58,7 +58,7 @@ V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
Counters*, WasmFeatures* detected_features,
base::Vector<const int> breakpoints = {},
std::unique_ptr<DebugSideTable>* = nullptr, int dead_breakpoint = 0,
int* max_steps = nullptr, bool* nondeterminism = nullptr);
int32_t* max_steps = nullptr, int32_t* nondeterminism = nullptr);
V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
const WasmCode*);

View File

@ -4371,6 +4371,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
Register tmp_gp,
DoubleRegister tmp_fp,
ValueKind lane_kind) {
if (lane_kind == kF32) {
movaps(tmp_fp, src);
cmpunordps(tmp_fp, tmp_fp);
} else {
DCHECK_EQ(lane_kind, kF64);
movapd(tmp_fp, src);
cmpunordpd(tmp_fp, tmp_fp);
}
pmovmskb(tmp_gp, tmp_fp);
orl(Operand(dst, 0), tmp_gp);
}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();

View File

@ -773,6 +773,8 @@
'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP],
'test-run-wasm/RunWasmLiftoff_Select_s128_parameters': [SKIP],
'test-run-wasm/RunWasmTurbofan_Select_s128_parameters': [SKIP],
'test-liftoff-for-fuzzing/NondeterminismUnopF32x4': [SKIP],
'test-liftoff-for-fuzzing/NondeterminismUnopF64x2': [SKIP],
}], # no_simd_hardware == True
################################################################################

View File

@ -40,6 +40,36 @@ TEST(NondeterminismUnopF64) {
CHECK(r.HasNondeterminism());
}
TEST(NondeterminismUnopF32x4) {
WasmRunner<int32_t, float> r(TestExecutionTier::kLiftoffForFuzzing);
byte value = 0;
BUILD(r,
WASM_SIMD_UNOP(kExprF32x4Ceil,
WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))),
kExprDrop, WASM_ONE);
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, 0.0);
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, std::nanf(""));
CHECK(r.HasNondeterminism());
}
TEST(NondeterminismUnopF64x2) {
WasmRunner<int32_t, double> r(TestExecutionTier::kLiftoffForFuzzing);
byte value = 0;
BUILD(r,
WASM_SIMD_UNOP(kExprF64x2Ceil,
WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))),
kExprDrop, WASM_ONE);
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, 0.0);
CHECK(!r.HasNondeterminism());
r.CheckCallViaJS(1, std::nan(""));
CHECK(r.HasNondeterminism());
}
TEST(NondeterminismBinop) {
WasmRunner<float> r(TestExecutionTier::kLiftoffForFuzzing);

View File

@ -264,8 +264,8 @@ class TestingModuleBuilder {
void set_max_steps(int n) { max_steps_ = n; }
int* max_steps_ptr() { return &max_steps_; }
bool nondeterminism() { return nondeterminism_; }
bool* non_determinism_ptr() { return &nondeterminism_; }
int32_t nondeterminism() { return nondeterminism_; }
int32_t* non_determinism_ptr() { return &nondeterminism_; }
void EnableFeature(WasmFeature feature) { enabled_features_.Add(feature); }
@ -282,8 +282,8 @@ class TestingModuleBuilder {
Handle<WasmInstanceObject> instance_object_;
NativeModule* native_module_ = nullptr;
RuntimeExceptionSupport runtime_exception_support_;
int max_steps_ = kMaxNumSteps;
bool nondeterminism_ = false;
int32_t max_steps_ = kMaxNumSteps;
int32_t nondeterminism_ = 0;
// Data segment arrays that are normally allocated on the instance.
std::vector<byte> data_segment_data_;