From 6f48b7b369188a16920d6141527067ea6ff76065 Mon Sep 17 00:00:00 2001 From: Thibaud Michaud Date: Fri, 2 Jul 2021 17:53:09 +0200 Subject: [PATCH] Reland "[wasm][liftoff][ia32][x64] Detect SIMD NaNs for fuzzing" This is a reland of b0bcedccfd1a4ac2fd278944544ea28f64205e8f Changes: - Consistently use int32_t for max_steps and nondeterminism - Skip SIMD tests on architectures that don't support it Original change's description: > [wasm][liftoff][ia32][x64] Detect SIMD NaNs for fuzzing > > R=clemensb@chromium.org > > Bug: v8:11856 > Change-Id: I9764e3e2944690ed0883afdab20afd47fdd4acfa > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2979605 > Reviewed-by: Clemens Backes > Commit-Queue: Thibaud Michaud > Cr-Commit-Position: refs/heads/master@{#75512} Bug: v8:11856 Change-Id: I0a7858d1c21c0dfb961b9b2c3fa1074f9362886a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3001178 Reviewed-by: Clemens Backes Commit-Queue: Thibaud Michaud Cr-Commit-Position: refs/heads/master@{#75557} --- src/wasm/baseline/arm/liftoff-assembler-arm.h | 7 + .../baseline/arm64/liftoff-assembler-arm64.h | 7 + .../baseline/ia32/liftoff-assembler-ia32.h | 16 ++ src/wasm/baseline/liftoff-assembler.h | 5 + src/wasm/baseline/liftoff-compiler.cc | 169 ++++++++++++------ src/wasm/baseline/liftoff-compiler.h | 2 +- src/wasm/baseline/x64/liftoff-assembler-x64.h | 16 ++ test/cctest/cctest.status | 2 + test/cctest/wasm/test-liftoff-for-fuzzing.cc | 30 ++++ test/cctest/wasm/wasm-run-utils.h | 8 +- 10 files changed, 199 insertions(+), 63 deletions(-) diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index 7f0b784b65..e2bd64c88f 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -4239,6 +4239,13 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src, UNIMPLEMENTED(); } +void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src, + Register tmp_gp, + DoubleRegister tmp_fp, + ValueKind lane_kind) { + UNIMPLEMENTED(); +} + void LiftoffStackSlots::Construct(int param_slots) { DCHECK_LT(0, slots_.size()); SortInPushOrder(); diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 7ac2ed7bab..e5694f758f 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -3242,6 +3242,13 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src, UNIMPLEMENTED(); } +void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src, + Register tmp_gp, + DoubleRegister tmp_fp, + ValueKind lane_kind) { + UNIMPLEMENTED(); +} + void LiftoffStackSlots::Construct(int param_slots) { DCHECK_LT(0, slots_.size()); // The stack pointer is required to be quadword aligned. diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 19ba740ef5..d29963dea1 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -4822,6 +4822,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src, bind(&ret); } +void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src, + Register tmp_gp, + DoubleRegister tmp_fp, + ValueKind lane_kind) { + if (lane_kind == kF32) { + movaps(tmp_fp, src); + cmpunordps(tmp_fp, tmp_fp); + } else { + DCHECK_EQ(lane_kind, kF64); + movapd(tmp_fp, src); + cmpunordpd(tmp_fp, tmp_fp); + } + pmovmskb(tmp_gp, tmp_fp); + or_(Operand(dst, 0), tmp_gp); +} + void LiftoffStackSlots::Construct(int param_slots) { DCHECK_LT(0, slots_.size()); SortInPushOrder(); diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index 7042ffa2e1..acaa40546d 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -1458,6 +1458,11 @@ class LiftoffAssembler : public TurboAssembler { // Set the i32 at address dst to 1 if src is a NaN. inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind); + // Set the i32 at address dst to a non-zero value if src contains a NaN. + inline void emit_s128_set_if_nan(Register dst, DoubleRegister src, + Register tmp_gp, DoubleRegister tmp_fp, + ValueKind lane_kind); + //////////////////////////////////// // End of platform-specific part. // //////////////////////////////////// diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 347d88b553..52ceb323e7 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -458,8 +458,8 @@ class LiftoffCompiler { DebugSideTableBuilder* debug_sidetable_builder, ForDebugging for_debugging, int func_index, base::Vector breakpoints = {}, - int dead_breakpoint = 0, int* max_steps = nullptr, - bool* nondeterminism = nullptr) + int dead_breakpoint = 0, int32_t* max_steps = nullptr, + int32_t* nondeterminism = nullptr) : asm_(std::move(buffer)), descriptor_( GetLoweredCallDescriptor(compilation_zone, call_descriptor)), @@ -1454,7 +1454,8 @@ class LiftoffCompiler { CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...); } - template + template void EmitUnOp(EmitFn fn) { constexpr RegClass src_rc = reg_class_for(src_kind); constexpr RegClass result_rc = reg_class_for(result_kind); @@ -1463,9 +1464,14 @@ class LiftoffCompiler { ? __ GetUnusedRegister(result_rc, {src}, {}) : __ GetUnusedRegister(result_rc, {}); CallEmitFn(fn, dst, src); - if (V8_UNLIKELY(nondeterminism_) && - (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64)) { - CheckNan(dst, LiftoffRegList::ForRegs(src, dst), result_kind); + if (V8_UNLIKELY(nondeterminism_)) { + auto pinned = LiftoffRegList::ForRegs(dst); + if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) { + CheckNan(dst, pinned, result_kind); + } else if (result_kind == ValueKind::kS128 && + (result_lane_kind == kF32 || result_lane_kind == kF64)) { + CheckS128Nan(dst, pinned, result_lane_kind); + } } __ PushRegister(result_kind, dst); } @@ -1696,7 +1702,8 @@ class LiftoffCompiler { } template + bool swap_lhs_rhs = false, ValueKind result_lane_kind = kVoid, + typename EmitFn> void EmitBinOp(EmitFn fn) { static constexpr RegClass src_rc = reg_class_for(src_kind); static constexpr RegClass result_rc = reg_class_for(result_kind); @@ -1709,9 +1716,14 @@ class LiftoffCompiler { if (swap_lhs_rhs) std::swap(lhs, rhs); CallEmitFn(fn, dst, lhs, rhs); - if (V8_UNLIKELY(nondeterminism_) && - (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64)) { - CheckNan(dst, LiftoffRegList::ForRegs(rhs, lhs, dst), result_kind); + if (V8_UNLIKELY(nondeterminism_)) { + auto pinned = LiftoffRegList::ForRegs(dst); + if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) { + CheckNan(dst, pinned, result_kind); + } else if (result_kind == ValueKind::kS128 && + (result_lane_kind == kF32 || result_lane_kind == kF64)) { + CheckS128Nan(dst, pinned, result_lane_kind); + } } __ PushRegister(result_kind, dst); } @@ -3293,7 +3305,8 @@ class LiftoffCompiler { __ bind(&cont_false); } - template + template void EmitTerOp(EmitFn fn) { static constexpr RegClass src_rc = reg_class_for(src_kind); static constexpr RegClass result_rc = reg_class_for(result_kind); @@ -3309,10 +3322,15 @@ class LiftoffCompiler { LiftoffRegList::ForRegs(src1, src2)) : __ GetUnusedRegister(result_rc, {}); CallEmitFn(fn, dst, src1, src2, src3); - if (V8_UNLIKELY(nondeterminism_) && - (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64)) { - CheckNan(dst, LiftoffRegList::ForRegs(src1, src2, src3, dst), - result_kind); + if (V8_UNLIKELY(nondeterminism_)) { + auto pinned = LiftoffRegList::ForRegs(dst); + if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) { + CheckNan(dst, pinned, result_kind); + } else if (result_kind == ValueKind::kS128 && + (result_lane_kind == kF32 || result_lane_kind == kF64)) { + CheckS128Nan(dst, LiftoffRegList::ForRegs(src1, src2, src3, dst), + result_lane_kind); + } } __ PushRegister(result_kind, dst); } @@ -3342,6 +3360,7 @@ class LiftoffCompiler { } } + template void EmitSimdFloatRoundingOpWithCFallback( bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister), ExternalReference (*ext_ref)()) { @@ -3353,6 +3372,10 @@ class LiftoffCompiler { auto sig_v_s = MakeSig::Params(kS128); GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref()); } + if (V8_UNLIKELY(nondeterminism_)) { + auto pinned = LiftoffRegList::ForRegs(dst); + CheckS128Nan(dst, pinned, result_lane_kind); + } __ PushRegister(kS128, dst); } @@ -3375,9 +3398,9 @@ class LiftoffCompiler { case wasm::kExprI64x2Splat: return EmitUnOp(&LiftoffAssembler::emit_i64x2_splat); case wasm::kExprF32x4Splat: - return EmitUnOp(&LiftoffAssembler::emit_f32x4_splat); + return EmitUnOp(&LiftoffAssembler::emit_f32x4_splat); case wasm::kExprF64x2Splat: - return EmitUnOp(&LiftoffAssembler::emit_f64x2_splat); + return EmitUnOp(&LiftoffAssembler::emit_f64x2_splat); case wasm::kExprI8x16Eq: return EmitBinOp(&LiftoffAssembler::emit_i8x16_eq); case wasm::kExprI8x16Ne: @@ -3689,92 +3712,108 @@ class LiftoffCompiler { return EmitUnOp( &LiftoffAssembler::emit_i64x2_uconvert_i32x4_high); case wasm::kExprF32x4Abs: - return EmitUnOp(&LiftoffAssembler::emit_f32x4_abs); + return EmitUnOp(&LiftoffAssembler::emit_f32x4_abs); case wasm::kExprF32x4Neg: - return EmitUnOp(&LiftoffAssembler::emit_f32x4_neg); + return EmitUnOp(&LiftoffAssembler::emit_f32x4_neg); case wasm::kExprF32x4Sqrt: - return EmitUnOp(&LiftoffAssembler::emit_f32x4_sqrt); + return EmitUnOp(&LiftoffAssembler::emit_f32x4_sqrt); case wasm::kExprF32x4Ceil: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f32x4_ceil, &ExternalReference::wasm_f32x4_ceil); case wasm::kExprF32x4Floor: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f32x4_floor, ExternalReference::wasm_f32x4_floor); case wasm::kExprF32x4Trunc: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f32x4_trunc, ExternalReference::wasm_f32x4_trunc); case wasm::kExprF32x4NearestInt: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f32x4_nearest_int, ExternalReference::wasm_f32x4_nearest_int); case wasm::kExprF32x4Add: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_add); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_add); case wasm::kExprF32x4Sub: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_sub); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_sub); case wasm::kExprF32x4Mul: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_mul); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_mul); case wasm::kExprF32x4Div: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_div); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_div); case wasm::kExprF32x4Min: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_min); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_min); case wasm::kExprF32x4Max: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_max); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_max); case wasm::kExprF32x4Pmin: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_pmin); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_pmin); case wasm::kExprF32x4Pmax: - return EmitBinOp(&LiftoffAssembler::emit_f32x4_pmax); + return EmitBinOp( + &LiftoffAssembler::emit_f32x4_pmax); case wasm::kExprF64x2Abs: - return EmitUnOp(&LiftoffAssembler::emit_f64x2_abs); + return EmitUnOp(&LiftoffAssembler::emit_f64x2_abs); case wasm::kExprF64x2Neg: - return EmitUnOp(&LiftoffAssembler::emit_f64x2_neg); + return EmitUnOp(&LiftoffAssembler::emit_f64x2_neg); case wasm::kExprF64x2Sqrt: - return EmitUnOp(&LiftoffAssembler::emit_f64x2_sqrt); + return EmitUnOp(&LiftoffAssembler::emit_f64x2_sqrt); case wasm::kExprF64x2Ceil: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f64x2_ceil, &ExternalReference::wasm_f64x2_ceil); case wasm::kExprF64x2Floor: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f64x2_floor, ExternalReference::wasm_f64x2_floor); case wasm::kExprF64x2Trunc: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f64x2_trunc, ExternalReference::wasm_f64x2_trunc); case wasm::kExprF64x2NearestInt: - return EmitSimdFloatRoundingOpWithCFallback( + return EmitSimdFloatRoundingOpWithCFallback( &LiftoffAssembler::emit_f64x2_nearest_int, ExternalReference::wasm_f64x2_nearest_int); case wasm::kExprF64x2Add: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_add); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_add); case wasm::kExprF64x2Sub: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_sub); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_sub); case wasm::kExprF64x2Mul: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_mul); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_mul); case wasm::kExprF64x2Div: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_div); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_div); case wasm::kExprF64x2Min: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_min); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_min); case wasm::kExprF64x2Max: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_max); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_max); case wasm::kExprF64x2Pmin: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_pmin); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_pmin); case wasm::kExprF64x2Pmax: - return EmitBinOp(&LiftoffAssembler::emit_f64x2_pmax); + return EmitBinOp( + &LiftoffAssembler::emit_f64x2_pmax); case wasm::kExprI32x4SConvertF32x4: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_i32x4_sconvert_f32x4); case wasm::kExprI32x4UConvertF32x4: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_i32x4_uconvert_f32x4); case wasm::kExprF32x4SConvertI32x4: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_f32x4_sconvert_i32x4); case wasm::kExprF32x4UConvertI32x4: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_f32x4_uconvert_i32x4); case wasm::kExprI8x16SConvertI16x8: return EmitBinOp( @@ -3829,16 +3868,16 @@ class LiftoffCompiler { case wasm::kExprI64x2Abs: return EmitUnOp(&LiftoffAssembler::emit_i64x2_abs); case wasm::kExprF64x2ConvertLowI32x4S: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_f64x2_convert_low_i32x4_s); case wasm::kExprF64x2ConvertLowI32x4U: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_f64x2_convert_low_i32x4_u); case wasm::kExprF64x2PromoteLowF32x4: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_f64x2_promote_low_f32x4); case wasm::kExprF32x4DemoteF64x2Zero: - return EmitUnOp( + return EmitUnOp( &LiftoffAssembler::emit_f32x4_demote_f64x2_zero); case wasm::kExprI32x4TruncSatF64x2SZero: return EmitUnOp( @@ -6107,6 +6146,20 @@ class LiftoffCompiler { __ emit_set_if_nan(nondeterminism_addr.gp(), src.fp(), kind); } + void CheckS128Nan(LiftoffRegister dst, LiftoffRegList pinned, + ValueKind lane_kind) { + RegClass rc = reg_class_for(kS128); + LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); + LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned)); + LiftoffRegister nondeterminism_addr = + pinned.set(__ GetUnusedRegister(kGpReg, pinned)); + __ LoadConstant( + nondeterminism_addr, + WasmValue::ForUintPtr(reinterpret_cast(nondeterminism_))); + __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(), + tmp_fp.fp(), lane_kind); + } + static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable; static constexpr base::EnumSet kUnconditionallySupported{ // MVP: @@ -6166,8 +6219,8 @@ class LiftoffCompiler { // Current number of exception refs on the stack. int num_exceptions_ = 0; - int* max_steps_; - bool* nondeterminism_; + int32_t* max_steps_; + int32_t* nondeterminism_; bool has_outstanding_op() const { return outstanding_op_ != kNoOutstandingOp; @@ -6224,7 +6277,7 @@ WasmCompilationResult ExecuteLiftoffCompilation( ForDebugging for_debugging, Counters* counters, WasmFeatures* detected, base::Vector breakpoints, std::unique_ptr* debug_sidetable, int dead_breakpoint, - int* max_steps, bool* nondeterminism) { + int32_t* max_steps, int32_t* nondeterminism) { int func_body_size = static_cast(func_body.end - func_body.start); TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"), "wasm.CompileBaseline", "funcIndex", func_index, "bodySize", diff --git a/src/wasm/baseline/liftoff-compiler.h b/src/wasm/baseline/liftoff-compiler.h index 25187aa010..e01d617ea4 100644 --- a/src/wasm/baseline/liftoff-compiler.h +++ b/src/wasm/baseline/liftoff-compiler.h @@ -58,7 +58,7 @@ V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation( Counters*, WasmFeatures* detected_features, base::Vector breakpoints = {}, std::unique_ptr* = nullptr, int dead_breakpoint = 0, - int* max_steps = nullptr, bool* nondeterminism = nullptr); + int32_t* max_steps = nullptr, int32_t* nondeterminism = nullptr); V8_EXPORT_PRIVATE std::unique_ptr GenerateLiftoffDebugSideTable( const WasmCode*); diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index fdca78d201..0744d2e09b 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -4371,6 +4371,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src, bind(&ret); } +void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src, + Register tmp_gp, + DoubleRegister tmp_fp, + ValueKind lane_kind) { + if (lane_kind == kF32) { + movaps(tmp_fp, src); + cmpunordps(tmp_fp, tmp_fp); + } else { + DCHECK_EQ(lane_kind, kF64); + movapd(tmp_fp, src); + cmpunordpd(tmp_fp, tmp_fp); + } + pmovmskb(tmp_gp, tmp_fp); + orl(Operand(dst, 0), tmp_gp); +} + void LiftoffStackSlots::Construct(int param_slots) { DCHECK_LT(0, slots_.size()); SortInPushOrder(); diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status index 7ec3f72cd0..88cd740068 100644 --- a/test/cctest/cctest.status +++ b/test/cctest/cctest.status @@ -773,6 +773,8 @@ 'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP], 'test-run-wasm/RunWasmLiftoff_Select_s128_parameters': [SKIP], 'test-run-wasm/RunWasmTurbofan_Select_s128_parameters': [SKIP], + 'test-liftoff-for-fuzzing/NondeterminismUnopF32x4': [SKIP], + 'test-liftoff-for-fuzzing/NondeterminismUnopF64x2': [SKIP], }], # no_simd_hardware == True ################################################################################ diff --git a/test/cctest/wasm/test-liftoff-for-fuzzing.cc b/test/cctest/wasm/test-liftoff-for-fuzzing.cc index 432c2761a4..ae168efda8 100644 --- a/test/cctest/wasm/test-liftoff-for-fuzzing.cc +++ b/test/cctest/wasm/test-liftoff-for-fuzzing.cc @@ -40,6 +40,36 @@ TEST(NondeterminismUnopF64) { CHECK(r.HasNondeterminism()); } +TEST(NondeterminismUnopF32x4) { + WasmRunner r(TestExecutionTier::kLiftoffForFuzzing); + + byte value = 0; + BUILD(r, + WASM_SIMD_UNOP(kExprF32x4Ceil, + WASM_SIMD_F32x4_SPLAT(WASM_LOCAL_GET(value))), + kExprDrop, WASM_ONE); + CHECK(!r.HasNondeterminism()); + r.CheckCallViaJS(1, 0.0); + CHECK(!r.HasNondeterminism()); + r.CheckCallViaJS(1, std::nanf("")); + CHECK(r.HasNondeterminism()); +} + +TEST(NondeterminismUnopF64x2) { + WasmRunner r(TestExecutionTier::kLiftoffForFuzzing); + + byte value = 0; + BUILD(r, + WASM_SIMD_UNOP(kExprF64x2Ceil, + WASM_SIMD_F64x2_SPLAT(WASM_LOCAL_GET(value))), + kExprDrop, WASM_ONE); + CHECK(!r.HasNondeterminism()); + r.CheckCallViaJS(1, 0.0); + CHECK(!r.HasNondeterminism()); + r.CheckCallViaJS(1, std::nan("")); + CHECK(r.HasNondeterminism()); +} + TEST(NondeterminismBinop) { WasmRunner r(TestExecutionTier::kLiftoffForFuzzing); diff --git a/test/cctest/wasm/wasm-run-utils.h b/test/cctest/wasm/wasm-run-utils.h index 1e6793d753..1610b89f4d 100644 --- a/test/cctest/wasm/wasm-run-utils.h +++ b/test/cctest/wasm/wasm-run-utils.h @@ -264,8 +264,8 @@ class TestingModuleBuilder { void set_max_steps(int n) { max_steps_ = n; } int* max_steps_ptr() { return &max_steps_; } - bool nondeterminism() { return nondeterminism_; } - bool* non_determinism_ptr() { return &nondeterminism_; } + int32_t nondeterminism() { return nondeterminism_; } + int32_t* non_determinism_ptr() { return &nondeterminism_; } void EnableFeature(WasmFeature feature) { enabled_features_.Add(feature); } @@ -282,8 +282,8 @@ class TestingModuleBuilder { Handle instance_object_; NativeModule* native_module_ = nullptr; RuntimeExceptionSupport runtime_exception_support_; - int max_steps_ = kMaxNumSteps; - bool nondeterminism_ = false; + int32_t max_steps_ = kMaxNumSteps; + int32_t nondeterminism_ = 0; // Data segment arrays that are normally allocated on the instance. std::vector data_segment_data_;