diff --git a/src/core/SkHalf.h b/src/core/SkHalf.h index 5f5575ae1a..82d4fb414c 100644 --- a/src/core/SkHalf.h +++ b/src/core/SkHalf.h @@ -8,6 +8,7 @@ #ifndef SkHalf_DEFINED #define SkHalf_DEFINED +#include "SkCpu.h" #include "SkNx.h" #include "SkTypes.h" @@ -122,3 +123,32 @@ static inline uint64_t SkFloatToHalf_01(const Sk4f& fs) { } #endif + +static inline Sk4f SkHalfToFloat_01(const uint64_t* hs) { +#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 + if (SkCpu::Supports(SkCpu::F16C)) { + __m128 fs; + #if defined(__GNUC__) || defined(__clang__) + asm("vcvtph2ps %[hs], %[fs]" : [fs]"=x"(fs) : [hs]"m"(*hs)); + #else + fs = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i*)hs)); + #endif + return fs; + } +#endif + return SkHalfToFloat_01(*hs); +} + +static inline void SkFloatToHalf_01(const Sk4f& fs, uint64_t* hs) { +#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 + if (SkCpu::Supports(SkCpu::F16C)) { + #if defined(__GNUC__) || defined(__clang__) + asm("vcvtps2ph $0, %[fs], %[hs]" : [hs]"=m"(*hs) : [fs]"x"(fs.fVec)); + #else + _mm_storel_epi64((__m128i*)hs, _mm_cvtps_ph(fs.fVec, 0)); + #endif + return; + } +#endif + *hs = SkFloatToHalf_01(fs); +} diff --git a/src/core/SkXfermodeF16.cpp b/src/core/SkXfermodeF16.cpp index dfcefa2de5..2c6873f764 100644 --- a/src/core/SkXfermodeF16.cpp +++ b/src/core/SkXfermodeF16.cpp @@ -134,15 +134,13 @@ static void srcover_1(const SkXfermode*, uint64_t dst[], const SkPM4f* src, int static void srcover_n(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int count, const SkAlpha aa[]) { for (int i = 0; i < count; ++i) { - const Sk4f s4 = Sk4f::Load(src[i].fVec); - const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); - const Sk4f d4 = SkHalfToFloat_01(dst[i]); - const Sk4f r4 = s4 + d4 * dst_scale; + Sk4f s = Sk4f::Load(src+i), + d = SkHalfToFloat_01(dst+i), + r = s + d*(1.0f - SkNx_shuffle<3,3,3,3>(s)); if (aa) { - dst[i] = SkFloatToHalf_01(lerp_by_coverage(r4, d4, aa[i])); - } else { - dst[i] = SkFloatToHalf_01(r4); + r = lerp_by_coverage(r, d, aa[i]); } + SkFloatToHalf_01(r, dst+i); } } diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h index 80c7f0e9ae..1fc235d99b 100644 --- a/src/opts/SkNx_sse.h +++ b/src/opts/SkNx_sse.h @@ -8,21 +8,13 @@ #ifndef SkNx_sse_DEFINED #define SkNx_sse_DEFINED +#include "SkCpu.h" + // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent. // If you do, make sure this is in a static inline function... anywhere else risks violating ODR. #define SKNX_IS_FAST -// SSE 4.1 has _mm_floor_ps to floor 4 floats. We emulate it: -// - roundtrip through integers via truncation -// - subtract 1 if that's too big (possible for negative values). -// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big. -static inline __m128 sse2_mm_floor_ps(__m128 v) { - __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v)); - __m128 too_big = _mm_cmpgt_ps(roundtrip, v); - return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); -} - template <> class SkNx<2, float> { public: @@ -97,7 +89,25 @@ public: static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } - SkNx floor() const { return sse2_mm_floor_ps(fVec); } + SkNx floor() const { + if (SkCpu::Supports(SkCpu::SSE41)) { + __m128 r; + #if defined(__GNUC__) || defined(__clang__) + asm("roundps $0x1, %[fVec], %[r]" : [r]"=x"(r) : [fVec]"x"(fVec)); + #else + r = _mm_floor_ps(fVec); + #endif + return r; + } + // Emulate _mm_floor_ps() with SSE2: + // - roundtrip through integers via truncation + // - subtract 1 if that's too big (possible for negative values). + // This restricts the domain of our inputs to a maximum somehwere around 2^31. + // Seems plenty big. + __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec)); + __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec); + return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); + } SkNx sqrt() const { return _mm_sqrt_ps (fVec); } SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }