1
0
mirror of https://github.com/microsoft/DirectXMath synced 2024-09-19 14:49:54 +00:00

Update IsNan tests for -fmath-finite-only scenarios (#184)

This commit is contained in:
Chuck Walbourn 2024-02-07 13:11:54 -08:00 committed by GitHub
parent c9a31a6c49
commit fdb07bb4a0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -2172,14 +2172,34 @@ inline XMVECTOR XM_CALLCONV XMVectorIsNaN(FXMVECTOR V) noexcept
return Control.v; return Control.v;
#elif defined(_XM_ARM_NEON_INTRINSICS_) #elif defined(_XM_ARM_NEON_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
XMVECTORU32 vResult = { { {
isnan(vgetq_lane_f32(V, 0)) ? 0xFFFFFFFFU : 0,
isnan(vgetq_lane_f32(V, 1)) ? 0xFFFFFFFFU : 0,
isnan(vgetq_lane_f32(V, 2)) ? 0xFFFFFFFFU : 0,
isnan(vgetq_lane_f32(V, 3)) ? 0xFFFFFFFFU : 0 } } };
return vResult.v;
#else
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
uint32x4_t vTempNan = vceqq_f32(V, V); uint32x4_t vTempNan = vceqq_f32(V, V);
// Flip results // Flip results
return vreinterpretq_f32_u32(vmvnq_u32(vTempNan)); return vreinterpretq_f32_u32(vmvnq_u32(vTempNan));
#endif
#elif defined(_XM_SSE_INTRINSICS_) #elif defined(_XM_SSE_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
XM_ALIGNED_DATA(16) float tmp[4];
_mm_store_ps(tmp, V);
XMVECTORU32 vResult = { { {
isnan(tmp[0]) ? 0xFFFFFFFFU : 0,
isnan(tmp[1]) ? 0xFFFFFFFFU : 0,
isnan(tmp[2]) ? 0xFFFFFFFFU : 0,
isnan(tmp[3]) ? 0xFFFFFFFFU : 0 } } };
return vResult.v;
#else
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
return _mm_cmpneq_ps(V, V); return _mm_cmpneq_ps(V, V);
#endif #endif
#endif
} }
#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER) #if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER)
@ -6619,17 +6639,27 @@ inline bool XM_CALLCONV XMVector2IsNaN(FXMVECTOR V) noexcept
return (XMISNAN(V.vector4_f32[0]) || return (XMISNAN(V.vector4_f32[0]) ||
XMISNAN(V.vector4_f32[1])); XMISNAN(V.vector4_f32[1]));
#elif defined(_XM_ARM_NEON_INTRINSICS_) #elif defined(_XM_ARM_NEON_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
return isnan(vgetq_lane_f32(V, 0)) || isnan(vgetq_lane_f32(V, 1));
#else
float32x2_t VL = vget_low_f32(V); float32x2_t VL = vget_low_f32(V);
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
uint32x2_t vTempNan = vceq_f32(VL, VL); uint32x2_t vTempNan = vceq_f32(VL, VL);
// If x or y are NaN, the mask is zero // If x or y are NaN, the mask is zero
return (vget_lane_u64(vreinterpret_u64_u32(vTempNan), 0) != 0xFFFFFFFFFFFFFFFFU); return (vget_lane_u64(vreinterpret_u64_u32(vTempNan), 0) != 0xFFFFFFFFFFFFFFFFU);
#endif
#elif defined(_XM_SSE_INTRINSICS_) #elif defined(_XM_SSE_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
XM_ALIGNED_DATA(16) float tmp[4];
_mm_store_ps(tmp, V);
return isnan(tmp[0]) || isnan(tmp[1]);
#else
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); XMVECTOR vTempNan = _mm_cmpneq_ps(V, V);
// If x or y are NaN, the mask is non-zero // If x or y are NaN, the mask is non-zero
return ((_mm_movemask_ps(vTempNan) & 3) != 0); return ((_mm_movemask_ps(vTempNan) & 3) != 0);
#endif #endif
#endif
} }
#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER) #if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER)
@ -9374,18 +9404,28 @@ inline bool XM_CALLCONV XMVector3IsNaN(FXMVECTOR V) noexcept
XMISNAN(V.vector4_f32[2])); XMISNAN(V.vector4_f32[2]));
#elif defined(_XM_ARM_NEON_INTRINSICS_) #elif defined(_XM_ARM_NEON_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
return isnan(vgetq_lane_f32(V, 0)) || isnan(vgetq_lane_f32(V, 1)) || isnan(vgetq_lane_f32(V, 2));
#else
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
uint32x4_t vTempNan = vceqq_f32(V, V); uint32x4_t vTempNan = vceqq_f32(V, V);
uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vreinterpretq_u8_u32(vTempNan)), vget_high_u8(vreinterpretq_u8_u32(vTempNan))); uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vreinterpretq_u8_u32(vTempNan)), vget_high_u8(vreinterpretq_u8_u32(vTempNan)));
uint16x4x2_t vTemp2 = vzip_u16(vreinterpret_u16_u8(vTemp.val[0]), vreinterpret_u16_u8(vTemp.val[1])); uint16x4x2_t vTemp2 = vzip_u16(vreinterpret_u16_u8(vTemp.val[0]), vreinterpret_u16_u8(vTemp.val[1]));
// If x or y or z are NaN, the mask is zero // If x or y or z are NaN, the mask is zero
return ((vget_lane_u32(vreinterpret_u32_u16(vTemp2.val[1]), 1) & 0xFFFFFFU) != 0xFFFFFFU); return ((vget_lane_u32(vreinterpret_u32_u16(vTemp2.val[1]), 1) & 0xFFFFFFU) != 0xFFFFFFU);
#endif
#elif defined(_XM_SSE_INTRINSICS_) #elif defined(_XM_SSE_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
XM_ALIGNED_DATA(16) float tmp[4];
_mm_store_ps(tmp, V);
return isnan(tmp[0]) || isnan(tmp[1]) || isnan(tmp[2]);
#else
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); XMVECTOR vTempNan = _mm_cmpneq_ps(V, V);
// If x or y or z are NaN, the mask is non-zero // If x or y or z are NaN, the mask is non-zero
return ((_mm_movemask_ps(vTempNan) & 7) != 0); return ((_mm_movemask_ps(vTempNan) & 7) != 0);
#endif #endif
#endif
} }
#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER) #if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER)
@ -13255,18 +13295,28 @@ inline bool XM_CALLCONV XMVector4IsNaN(FXMVECTOR V) noexcept
XMISNAN(V.vector4_f32[2]) || XMISNAN(V.vector4_f32[2]) ||
XMISNAN(V.vector4_f32[3])); XMISNAN(V.vector4_f32[3]));
#elif defined(_XM_ARM_NEON_INTRINSICS_) #elif defined(_XM_ARM_NEON_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
return isnan(vgetq_lane_f32(V, 0)) || isnan(vgetq_lane_f32(V, 1)) || isnan(vgetq_lane_f32(V, 2)) || isnan(vgetq_lane_f32(V, 3));
#else
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
uint32x4_t vTempNan = vceqq_f32(V, V); uint32x4_t vTempNan = vceqq_f32(V, V);
uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vreinterpretq_u8_u32(vTempNan)), vget_high_u8(vreinterpretq_u8_u32(vTempNan))); uint8x8x2_t vTemp = vzip_u8(vget_low_u8(vreinterpretq_u8_u32(vTempNan)), vget_high_u8(vreinterpretq_u8_u32(vTempNan)));
uint16x4x2_t vTemp2 = vzip_u16(vreinterpret_u16_u8(vTemp.val[0]), vreinterpret_u16_u8(vTemp.val[1])); uint16x4x2_t vTemp2 = vzip_u16(vreinterpret_u16_u8(vTemp.val[0]), vreinterpret_u16_u8(vTemp.val[1]));
// If any are NaN, the mask is zero // If any are NaN, the mask is zero
return (vget_lane_u32(vreinterpret_u32_u16(vTemp2.val[1]), 1) != 0xFFFFFFFFU); return (vget_lane_u32(vreinterpret_u32_u16(vTemp2.val[1]), 1) != 0xFFFFFFFFU);
#endif
#elif defined(_XM_SSE_INTRINSICS_) #elif defined(_XM_SSE_INTRINSICS_)
#if defined(__clang__) && defined(__FINITE_MATH_ONLY__)
XM_ALIGNED_DATA(16) float tmp[4];
_mm_store_ps(tmp, V);
return isnan(tmp[0]) || isnan(tmp[1]) || isnan(tmp[2]) || isnan(tmp[3]);
#else
// Test against itself. NaN is always not equal // Test against itself. NaN is always not equal
XMVECTOR vTempNan = _mm_cmpneq_ps(V, V); XMVECTOR vTempNan = _mm_cmpneq_ps(V, V);
// If any are NaN, the mask is non-zero // If any are NaN, the mask is non-zero
return (_mm_movemask_ps(vTempNan) != 0); return (_mm_movemask_ps(vTempNan) != 0);
#endif #endif
#endif
} }
#if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER) #if !defined(_XM_NO_INTRINSICS_) && defined(_MSC_VER) && !defined(__INTEL_COMPILER)