skcpu: sse4.1 floor, f16c f16<->f32
- floor with roundps is about 4.5x faster when available - f16 srcover_n is similar to but a little faster than the version in https://codereview.chromium.org/1884683002. This new one fuses the dst load/stores into the f16<->f32 conversions: +0x180 movups (%r15), %xmm1 +0x184 vcvtph2ps (%rbx), %xmm2 +0x189 movaps %xmm1, %xmm3 +0x18c shufps $255, %xmm3, %xmm3 +0x190 movaps %xmm0, %xmm4 +0x193 subps %xmm3, %xmm4 +0x196 mulps %xmm2, %xmm4 +0x199 addps %xmm1, %xmm4 +0x19c vcvtps2ph $0, %xmm4, (%rbx) +0x1a2 addq $16, %r15 +0x1a6 addq $8, %rbx +0x1aa decl %r14d +0x1ad jne +0x180 If we decide to land this it'd be a good idea to convert most or all users of SkFloatToHalf_01 and SkHalfToFloat_01 over to the pointer-based versions. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1891513002 CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Committed: https://skia.googlesource.com/skia/+/cbe3c1af987d622ea67ef560d855b41bb14a0ce9 Committed: https://skia.googlesource.com/skia/+/3faf74b8364491ca806f523fbb1d8a97be592663 Review URL: https://codereview.chromium.org/1891513002
This commit is contained in:
parent
51dece33e8
commit
244a65350e
@ -8,6 +8,7 @@
|
||||
#ifndef SkHalf_DEFINED
|
||||
#define SkHalf_DEFINED
|
||||
|
||||
#include "SkCpu.h"
|
||||
#include "SkNx.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
@ -122,3 +123,32 @@ static inline uint64_t SkFloatToHalf_01(const Sk4f& fs) {
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline Sk4f SkHalfToFloat_01(const uint64_t* hs) {
|
||||
#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
|
||||
if (SkCpu::Supports(SkCpu::F16C)) {
|
||||
__m128 fs;
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
asm("vcvtph2ps %[hs], %[fs]" : [fs]"=x"(fs) : [hs]"m"(*hs));
|
||||
#else
|
||||
fs = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i*)hs));
|
||||
#endif
|
||||
return fs;
|
||||
}
|
||||
#endif
|
||||
return SkHalfToFloat_01(*hs);
|
||||
}
|
||||
|
||||
static inline void SkFloatToHalf_01(const Sk4f& fs, uint64_t* hs) {
|
||||
#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
|
||||
if (SkCpu::Supports(SkCpu::F16C)) {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
asm("vcvtps2ph $0, %[fs], %[hs]" : [hs]"=m"(*hs) : [fs]"x"(fs.fVec));
|
||||
#else
|
||||
_mm_storel_epi64((__m128i*)hs, _mm_cvtps_ph(fs.fVec, 0));
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
*hs = SkFloatToHalf_01(fs);
|
||||
}
|
||||
|
@ -134,15 +134,13 @@ static void srcover_1(const SkXfermode*, uint64_t dst[], const SkPM4f* src, int
|
||||
static void srcover_n(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int count,
|
||||
const SkAlpha aa[]) {
|
||||
for (int i = 0; i < count; ++i) {
|
||||
const Sk4f s4 = Sk4f::Load(src[i].fVec);
|
||||
const Sk4f dst_scale = Sk4f(1 - get_alpha(s4));
|
||||
const Sk4f d4 = SkHalfToFloat_01(dst[i]);
|
||||
const Sk4f r4 = s4 + d4 * dst_scale;
|
||||
Sk4f s = Sk4f::Load(src+i),
|
||||
d = SkHalfToFloat_01(dst+i),
|
||||
r = s + d*(1.0f - SkNx_shuffle<3,3,3,3>(s));
|
||||
if (aa) {
|
||||
dst[i] = SkFloatToHalf_01(lerp_by_coverage(r4, d4, aa[i]));
|
||||
} else {
|
||||
dst[i] = SkFloatToHalf_01(r4);
|
||||
r = lerp_by_coverage(r, d, aa[i]);
|
||||
}
|
||||
SkFloatToHalf_01(r, dst+i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,21 +8,13 @@
|
||||
#ifndef SkNx_sse_DEFINED
|
||||
#define SkNx_sse_DEFINED
|
||||
|
||||
#include "SkCpu.h"
|
||||
|
||||
// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
|
||||
// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
|
||||
|
||||
#define SKNX_IS_FAST
|
||||
|
||||
// SSE 4.1 has _mm_floor_ps to floor 4 floats. We emulate it:
|
||||
// - roundtrip through integers via truncation
|
||||
// - subtract 1 if that's too big (possible for negative values).
|
||||
// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
|
||||
static inline __m128 sse2_mm_floor_ps(__m128 v) {
|
||||
__m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
|
||||
__m128 too_big = _mm_cmpgt_ps(roundtrip, v);
|
||||
return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
|
||||
}
|
||||
|
||||
template <>
|
||||
class SkNx<2, float> {
|
||||
public:
|
||||
@ -97,7 +89,25 @@ public:
|
||||
static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
|
||||
|
||||
SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
|
||||
SkNx floor() const { return sse2_mm_floor_ps(fVec); }
|
||||
SkNx floor() const {
|
||||
if (SkCpu::Supports(SkCpu::SSE41)) {
|
||||
__m128 r;
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
asm("roundps $0x1, %[fVec], %[r]" : [r]"=x"(r) : [fVec]"x"(fVec));
|
||||
#else
|
||||
r = _mm_floor_ps(fVec);
|
||||
#endif
|
||||
return r;
|
||||
}
|
||||
// Emulate _mm_floor_ps() with SSE2:
|
||||
// - roundtrip through integers via truncation
|
||||
// - subtract 1 if that's too big (possible for negative values).
|
||||
// This restricts the domain of our inputs to a maximum somehwere around 2^31.
|
||||
// Seems plenty big.
|
||||
__m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec));
|
||||
__m128 too_big = _mm_cmpgt_ps(roundtrip, fVec);
|
||||
return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
|
||||
}
|
||||
|
||||
SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
|
||||
SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
|
||||
|
Loading…
Reference in New Issue
Block a user