Unify some SkNx code

- one base case and one N=1 case instead of two each (or three with doubles)
 - use SkNx_cast instead of FromBytes/toBytes
 - 4-at-a-time Sk4f::ToBytes becomes a special standalone Sk4f_ToBytes

If I did everything right, this'll be perf- and pixel- neutral.

https://gold.skia.org/search2?issue=1526523003&unt=true&query=source_type%3Dgm&master=false

BUG=skia:
CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot

Review URL: https://codereview.chromium.org/1526523003
This commit is contained in:
mtklein 2015-12-14 11:25:18 -08:00 committed by Commit bot
parent 4e4155df10
commit 6f37b4a475
11 changed files with 200 additions and 261 deletions

View File

@ -33,8 +33,8 @@ struct Sk4fBytesRoundtripBench : public Benchmark {
for (int i = 0; i < loops; i++) {
uint32_t color = lcg_rand(&seed),
back;
auto f = Sk4f::FromBytes((const uint8_t*)&color);
f.toBytes((uint8_t*)&back);
auto f = SkNx_cast<float>(Sk4b::Load((const uint8_t*)&color));
SkNx_cast<uint8_t>(f).store((uint8_t*)&back);
junk ^= back;
}
blackhole ^= junk;
@ -62,7 +62,7 @@ struct Sk4fGradientBench : public Benchmark {
c = b + dcdx,
d = c + dcdx;
for (size_t i = 0; i < SK_ARRAY_COUNT(fDevice); i += 4) {
Sk4f::ToBytes((uint8_t*)(fDevice+i), a, b, c, d);
Sk4f_ToBytes((uint8_t*)(fDevice+i), a, b, c, d);
a = a + dcdx4;
b = b + dcdx4;
c = c + dcdx4;

View File

@ -9,8 +9,7 @@
#define SkNx_DEFINED
#define SKNX_NO_SIMDx // Remove the x to disable SIMD for all SkNx types.
//#define SKNX_NO_SIMD
#include "SkScalar.h"
#include "SkTypes.h"
@ -25,7 +24,8 @@
namespace {
// The default implementations just fall back on a pair of size N/2.
// These support the union of operations we might do to ints and floats, but
// platform specializations might support fewer (e.g. no float <<, no int /).
template <int N, typename T>
class SkNx {
public:
@ -55,14 +55,33 @@ public:
SkNx operator + (const SkNx& o) const { return SkNx(fLo + o.fLo, fHi + o.fHi); }
SkNx operator - (const SkNx& o) const { return SkNx(fLo - o.fLo, fHi - o.fHi); }
SkNx operator * (const SkNx& o) const { return SkNx(fLo * o.fLo, fHi * o.fHi); }
SkNx operator / (const SkNx& o) const { return SkNx(fLo / o.fLo, fHi / o.fHi); }
SkNx operator << (int bits) const { return SkNx(fLo << bits, fHi << bits); }
SkNx operator >> (int bits) const { return SkNx(fLo >> bits, fHi >> bits); }
SkNx operator == (const SkNx& o) const { return SkNx(fLo == o.fLo, fHi == o.fHi); }
SkNx operator != (const SkNx& o) const { return SkNx(fLo != o.fLo, fHi != o.fHi); }
SkNx operator < (const SkNx& o) const { return SkNx(fLo < o.fLo, fHi < o.fHi); }
SkNx operator > (const SkNx& o) const { return SkNx(fLo > o.fLo, fHi > o.fHi); }
SkNx operator <= (const SkNx& o) const { return SkNx(fLo <= o.fLo, fHi <= o.fHi); }
SkNx operator >= (const SkNx& o) const { return SkNx(fLo >= o.fLo, fHi >= o.fHi); }
static SkNx Min(const SkNx& a, const SkNx& b) {
return SkNx(SkNx<N/2, T>::Min(a.fLo, b.fLo), SkNx<N/2, T>::Min(a.fHi, b.fHi));
}
SkNx operator < (const SkNx& o) const { return SkNx(fLo < o.fLo, fHi < o.fHi); }
static SkNx Max(const SkNx& a, const SkNx& b) {
return SkNx(SkNx<N/2, T>::Max(a.fLo, b.fLo), SkNx<N/2, T>::Max(a.fHi, b.fHi));
}
SkNx sqrt() const { return SkNx(fLo.sqrt(), fHi.sqrt()); }
// Generally, increasing precision, increasing cost.
SkNx rsqrt0() const { return SkNx(fLo.rsqrt0(), fHi.rsqrt0()); }
SkNx rsqrt1() const { return SkNx(fLo.rsqrt1(), fHi.rsqrt1()); }
SkNx rsqrt2() const { return SkNx(fLo.rsqrt2(), fHi.rsqrt2()); }
SkNx invert() const { return SkNx(fLo. invert(), fHi. invert()); }
SkNx approxInvert() const { return SkNx(fLo.approxInvert(), fHi.approxInvert()); }
template <int k> T kth() const {
SkASSERT(0 <= k && k < N);
@ -81,97 +100,7 @@ protected:
SkNx<N/2, T> fLo, fHi;
};
template <int N>
class SkNx<N,float> {
public:
SkNx() {}
SkNx(float val) : fLo(val), fHi(val) {}
static SkNx Load(const float vals[N]) {
return SkNx(SkNx<N/2, float>::Load(vals), SkNx<N/2, float>::Load(vals+N/2));
}
// FromBytes() and toBytes() specializations may assume their argument is N-byte aligned.
// E.g. Sk4f::FromBytes() may assume it's reading from a 4-byte-aligned pointer.
// Converts [0,255] bytes to [0.0, 255.0] floats.
static SkNx FromBytes(const uint8_t bytes[N]) {
return SkNx(SkNx<N/2, float>::FromBytes(bytes), SkNx<N/2, float>::FromBytes(bytes+N/2));
}
SkNx(float a, float b) : fLo(a), fHi(b) { REQUIRE(N==2); }
SkNx(float a, float b, float c, float d) : fLo(a,b), fHi(c,d) { REQUIRE(N==4); }
SkNx(float a, float b, float c, float d, float e, float f, float g, float h)
: fLo(a,b,c,d)
, fHi(e,f,g,h) { REQUIRE(N==8); }
void store(float vals[N]) const {
fLo.store(vals);
fHi.store(vals+N/2);
}
// Please see note on FromBytes().
// Clamps to [0.0,255.0] floats and truncates to [0,255] bytes.
void toBytes(uint8_t bytes[N]) const {
fLo.toBytes(bytes);
fHi.toBytes(bytes+N/2);
}
// Some implementations can do this faster.
static void ToBytes(uint8_t bytes[4*N],
const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
a.toBytes(bytes+0*N);
b.toBytes(bytes+1*N);
c.toBytes(bytes+2*N);
d.toBytes(bytes+3*N);
}
SkNx operator + (const SkNx& o) const { return SkNx(fLo + o.fLo, fHi + o.fHi); }
SkNx operator - (const SkNx& o) const { return SkNx(fLo - o.fLo, fHi - o.fHi); }
SkNx operator * (const SkNx& o) const { return SkNx(fLo * o.fLo, fHi * o.fHi); }
SkNx operator / (const SkNx& o) const { return SkNx(fLo / o.fLo, fHi / o.fHi); }
SkNx operator == (const SkNx& o) const { return SkNx(fLo == o.fLo, fHi == o.fHi); }
SkNx operator != (const SkNx& o) const { return SkNx(fLo != o.fLo, fHi != o.fHi); }
SkNx operator < (const SkNx& o) const { return SkNx(fLo < o.fLo, fHi < o.fHi); }
SkNx operator > (const SkNx& o) const { return SkNx(fLo > o.fLo, fHi > o.fHi); }
SkNx operator <= (const SkNx& o) const { return SkNx(fLo <= o.fLo, fHi <= o.fHi); }
SkNx operator >= (const SkNx& o) const { return SkNx(fLo >= o.fLo, fHi >= o.fHi); }
static SkNx Min(const SkNx& l, const SkNx& r) {
return SkNx(SkNx<N/2, float>::Min(l.fLo, r.fLo), SkNx<N/2, float>::Min(l.fHi, r.fHi));
}
static SkNx Max(const SkNx& l, const SkNx& r) {
return SkNx(SkNx<N/2, float>::Max(l.fLo, r.fLo), SkNx<N/2, float>::Max(l.fHi, r.fHi));
}
SkNx sqrt() const { return SkNx(fLo. sqrt(), fHi. sqrt()); }
// Generally, increasing precision, increasing cost.
SkNx rsqrt0() const { return SkNx(fLo.rsqrt0(), fHi.rsqrt0()); }
SkNx rsqrt1() const { return SkNx(fLo.rsqrt1(), fHi.rsqrt1()); }
SkNx rsqrt2() const { return SkNx(fLo.rsqrt2(), fHi.rsqrt2()); }
SkNx invert() const { return SkNx(fLo. invert(), fHi. invert()); }
SkNx approxInvert() const { return SkNx(fLo.approxInvert(), fHi.approxInvert()); }
template <int k> float kth() const {
SkASSERT(0 <= k && k < N);
return k < N/2 ? fLo.template kth<k>() : fHi.template kth<k-N/2>();
}
bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); }
bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); }
SkNx thenElse(const SkNx& t, const SkNx& e) const {
return SkNx(fLo.thenElse(t.fLo, e.fLo), fHi.thenElse(t.fHi, e.fHi));
}
protected:
REQUIRE(0 == (N & (N-1)));
SkNx(const SkNx<N/2, float>& lo, const SkNx<N/2, float>& hi) : fLo(lo), fHi(hi) {}
SkNx<N/2, float> fLo, fHi;
};
// Bottom out the default implementations with scalars when nothing's been specialized.
template <typename T>
class SkNx<1,T> {
public:
@ -190,42 +119,11 @@ public:
SkNx operator + (const SkNx& o) const { return SkNx(fVal + o.fVal); }
SkNx operator - (const SkNx& o) const { return SkNx(fVal - o.fVal); }
SkNx operator * (const SkNx& o) const { return SkNx(fVal * o.fVal); }
SkNx operator / (const SkNx& o) const { return SkNx(fVal / o.fVal); }
SkNx operator << (int bits) const { return SkNx(fVal << bits); }
SkNx operator >> (int bits) const { return SkNx(fVal >> bits); }
static SkNx Min(const SkNx& a, const SkNx& b) { return SkNx(SkTMin(a.fVal, b.fVal)); }
SkNx operator <(const SkNx& o) const { return SkNx(fVal < o.fVal); }
template <int k> T kth() const {
SkASSERT(0 == k);
return fVal;
}
bool allTrue() const { return fVal; }
bool anyTrue() const { return fVal; }
SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal ? t : e; }
protected:
T fVal;
};
template <>
class SkNx<1,float> {
public:
SkNx() {}
SkNx(float val) : fVal(val) {}
static SkNx Load(const float vals[1]) { return SkNx(vals[0]); }
static SkNx FromBytes(const uint8_t bytes[1]) { return SkNx((float)bytes[0]); }
void store(float vals[1]) const { vals[0] = fVal; }
void toBytes(uint8_t bytes[1]) const { bytes[0] = (uint8_t)(SkTMin(fVal, 255.0f)); }
SkNx operator + (const SkNx& o) const { return SkNx(fVal + o.fVal); }
SkNx operator - (const SkNx& o) const { return SkNx(fVal - o.fVal); }
SkNx operator * (const SkNx& o) const { return SkNx(fVal * o.fVal); }
SkNx operator / (const SkNx& o) const { return SkNx(fVal / o.fVal); }
SkNx operator == (const SkNx& o) const { return SkNx(fVal == o.fVal); }
SkNx operator != (const SkNx& o) const { return SkNx(fVal != o.fVal); }
SkNx operator < (const SkNx& o) const { return SkNx(fVal < o.fVal); }
@ -233,33 +131,31 @@ public:
SkNx operator <= (const SkNx& o) const { return SkNx(fVal <= o.fVal); }
SkNx operator >= (const SkNx& o) const { return SkNx(fVal >= o.fVal); }
static SkNx Min(const SkNx& l, const SkNx& r) { return SkNx(SkTMin(l.fVal, r.fVal)); }
static SkNx Max(const SkNx& l, const SkNx& r) { return SkNx(SkTMax(l.fVal, r.fVal)); }
static SkNx Min(const SkNx& a, const SkNx& b) { return SkNx(SkTMin(a.fVal, b.fVal)); }
static SkNx Max(const SkNx& a, const SkNx& b) { return SkNx(SkTMax(a.fVal, b.fVal)); }
SkNx sqrt() const { return SkNx(sqrtf(fVal)); }
SkNx rsqrt0() const { return SkNx(1.0f / sqrtf(fVal)); }
SkNx sqrt () const { return SkNx(Sqrt(fVal)); }
SkNx rsqrt0() const { return this->sqrt().invert(); }
SkNx rsqrt1() const { return this->rsqrt0(); }
SkNx rsqrt2() const { return this->rsqrt1(); }
SkNx invert() const { return SkNx(1.0f / fVal); }
SkNx invert() const { return SkNx(1) / SkNx(fVal); }
SkNx approxInvert() const { return this->invert(); }
template <int k> float kth() const {
SkASSERT(k == 0);
template <int k> T kth() const {
SkASSERT(0 == k);
return fVal;
}
bool allTrue() const { return this->pun() != 0; }
bool anyTrue() const { return this->pun() != 0; }
SkNx thenElse(const SkNx& t, const SkNx& e) const { return this->pun() ? t : e; }
bool allTrue() const { return fVal != 0; }
bool anyTrue() const { return fVal != 0; }
SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal != 0 ? t : e; }
protected:
uint32_t pun() const {
union { float f; uint32_t i; } pun = { fVal };
return pun.i;
}
static double Sqrt(double val) { return ::sqrt (val); }
static float Sqrt(float val) { return ::sqrtf(val); }
float fVal;
T fVal;
};
// This default implementation can be specialized by ../opts/SkNx_foo.h
@ -296,7 +192,7 @@ SkNx<N,D> SkNx_cast_fallback(const SkNx<N,S>& src, SkIntSequence<Ix...>) {
// This is a generic cast between two SkNx with the same number of elements N. E.g.
// Sk4b bs = ...; // Load 4 bytes.
// Sk4f fs = SkNx_cast<float>(bs); // (This will replace SkNf::FromBytes() one day.)
// Sk4f fs = SkNx_cast<float>(bs); // Cast each byte to a float.
// Sk4i is = SkNx_cast<int>(fs); // Cast each float to int.
// This can be specialized in ../opts/SkNx_foo.h if there's a better platform-specific cast.
template <typename D, typename S, int N>
@ -306,20 +202,6 @@ SkNx<N,D> SkNx_cast(const SkNx<N,S>& src) {
} // namespace
// Include platform specific specializations if available.
#ifndef SKNX_NO_SIMD
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
#include "../opts/SkNx_avx.h"
#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
#include "../opts/SkNx_sse.h"
#elif defined(SK_ARM_HAS_NEON)
#include "../opts/SkNx_neon.h"
#endif
#endif
#undef REQUIRE
typedef SkNx<2, float> Sk2f;
typedef SkNx<2, float> Sk2s;
typedef SkNx<4, float> Sk4f;
@ -327,10 +209,34 @@ typedef SkNx<4, float> Sk4s;
typedef SkNx<8, float> Sk8f;
typedef SkNx<8, float> Sk8s;
typedef SkNx<8, uint16_t> Sk8h;
typedef SkNx< 4, uint16_t> Sk4h;
typedef SkNx< 8, uint16_t> Sk8h;
typedef SkNx<16, uint16_t> Sk16h;
typedef SkNx< 4, uint8_t> Sk4b;
typedef SkNx< 8, uint8_t> Sk8b;
typedef SkNx<16, uint8_t> Sk16b;
typedef SkNx<4, int> Sk4i;
// Include platform specific specializations if available.
#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX
#include "../opts/SkNx_avx.h"
#elif !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
#include "../opts/SkNx_sse.h"
#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON)
#include "../opts/SkNx_neon.h"
#else
static inline
void Sk4f_ToBytes(uint8_t p[16], const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
SkNx_cast<uint8_t>(a).store(p+ 0);
SkNx_cast<uint8_t>(b).store(p+ 4);
SkNx_cast<uint8_t>(c).store(p+ 8);
SkNx_cast<uint8_t>(d).store(p+12);
}
#endif
#undef REQUIRE
#endif//SkNx_DEFINED

View File

@ -258,7 +258,7 @@ static Sk4f clamp_0_1(const Sk4f& x) {
static SkPMColor round(const Sk4f& x) {
SkPMColor c;
(x * Sk4f(255) + Sk4f(0.5f)).toBytes((uint8_t*)&c);
SkNx_cast<uint8_t>(x * Sk4f(255) + Sk4f(0.5f)).store((uint8_t*)&c);
return c;
}
@ -296,7 +296,7 @@ void SkColorMatrixFilter::filterSpan(const SkPMColor src[], int count, SkPMColor
continue;
}
Sk4f srcf = Sk4f::FromBytes((const uint8_t*)&src_c) * Sk4f(1.0f/255);
Sk4f srcf = SkNx_cast<float>(Sk4b::Load((const uint8_t*)&src_c)) * Sk4f(1.0f/255);
if (0xFF != SkGetPackedA32(src_c)) {
srcf = unpremul(srcf);

View File

@ -150,7 +150,7 @@ SkLinearGradient::LinearGradientContext::LinearGradientContext(
const Sk4f scale(1, 1, 1, paintAlpha);
for (int i = 0; i < count; ++i) {
uint32_t c = SkSwizzle_Color_to_PMColor(shader.fOrigColors[i]);
rec[i].fColor = Sk4f::FromBytes((const uint8_t*)&c) * scale;
rec[i].fColor = SkNx_cast<float>(Sk4b::Load((const uint8_t*)&c)) * scale;
if (i > 0) {
SkASSERT(rec[i - 1].fPos <= rec[i].fPos);
}
@ -162,7 +162,7 @@ SkLinearGradient::LinearGradientContext::LinearGradientContext(
for (int i = 0; i < count; ++i) {
SkPMColor pmc = SkPreMultiplyColor(shader.fOrigColors[i]);
pmc = SkAlphaMulQ(pmc, alphaScale);
rec[i].fColor = Sk4f::FromBytes((const uint8_t*)&pmc);
rec[i].fColor = SkNx_cast<float>(Sk4b::Load((const uint8_t*)&pmc));
if (i > 0) {
SkASSERT(rec[i - 1].fPos <= rec[i].fPos);
}
@ -699,7 +699,7 @@ find_backward(const SkLinearGradient::LinearGradientContext::Rec rec[], float ti
template <bool apply_alpha> SkPMColor trunc_from_255(const Sk4f& x) {
SkPMColor c;
x.toBytes((uint8_t*)&c);
SkNx_cast<uint8_t>(x).store((uint8_t*)&c);
if (apply_alpha) {
c = SkPreMultiplyARGB(SkGetPackedA32(c), SkGetPackedR32(c),
SkGetPackedG32(c), SkGetPackedB32(c));
@ -751,7 +751,7 @@ template <bool apply_alpha> void ramp(SkPMColor dstC[], int n, const Sk4f& c, co
Sk4f cd3 = cd1 + dc2;
while (n >= 4) {
if (!apply_alpha) {
Sk4f::ToBytes((uint8_t*)dstC, cd0, cd1, cd2, cd3);
Sk4f_ToBytes((uint8_t*)dstC, cd0, cd1, cd2, cd3);
dstC += 4;
} else {
*dstC++ = trunc_from_255<apply_alpha>(cd0);

View File

@ -307,7 +307,7 @@ void shadeSpan_radial_clamp2(SkScalar sfx, SkScalar sdx, SkScalar sfy, SkScalar
dR = dR + ddR;
uint8_t fi[4];
dist.toBytes(fi);
SkNx_cast<uint8_t>(dist).store(fi);
for (int i = 0; i < 4; i++) {
*dstC++ = cache[toggle + fi[i]];
@ -319,7 +319,7 @@ void shadeSpan_radial_clamp2(SkScalar sfx, SkScalar sdx, SkScalar sfy, SkScalar
Sk4f dist = Sk4f::Min(fast_sqrt(R), max);
uint8_t fi[4];
dist.toBytes(fi);
SkNx_cast<uint8_t>(dist).store(fi);
for (int i = 0; i < count; i++) {
*dstC++ = cache[toggle + fi[i]];
toggle = next_dither_toggle(toggle);

View File

@ -59,10 +59,10 @@ void color_cube_filter_span(const SkPMColor src[],
const SkColor lutColor10 = colorCube[ix + i10];
const SkColor lutColor11 = colorCube[ix + i11];
Sk4f sum = Sk4f::FromBytes((const uint8_t*)&lutColor00) * g0b0;
sum = sum + Sk4f::FromBytes((const uint8_t*)&lutColor01) * g0b1;
sum = sum + Sk4f::FromBytes((const uint8_t*)&lutColor10) * g1b0;
sum = sum + Sk4f::FromBytes((const uint8_t*)&lutColor11) * g1b1;
Sk4f sum = SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor00)) * g0b0;
sum = sum + SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor01)) * g0b1;
sum = sum + SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor10)) * g1b0;
sum = sum + SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor11)) * g1b1;
color = color + sum * Sk4f((float)colorToFactors[x][r]);
}
if (a != 255) {
@ -74,7 +74,7 @@ void color_cube_filter_span(const SkPMColor src[],
color = SkNx_shuffle<2,1,0,3>(color);
#endif
uint8_t* dstBytes = (uint8_t*)(dst+i);
color.toBytes(dstBytes);
SkNx_cast<uint8_t>(color).store(dstBytes);
dstBytes[SK_A32_SHIFT/8] = a;
}
}

View File

@ -26,27 +26,10 @@ public:
SkNx(float val) : fVec(_mm256_set1_ps(val)) {}
static SkNx Load(const float vals[8]) { return _mm256_loadu_ps(vals); }
static SkNx FromBytes(const uint8_t bytes[8]) {
__m128i fix8 = _mm_loadl_epi64((const __m128i*)bytes),
fix16 = _mm_unpacklo_epi8 (fix8 , _mm_setzero_si128()),
lo32 = _mm_unpacklo_epi16(fix16, _mm_setzero_si128()),
hi32 = _mm_unpackhi_epi16(fix16, _mm_setzero_si128());
__m256i fix32 = _mm256_insertf128_si256(_mm256_castsi128_si256(lo32), hi32, 1);
return _mm256_cvtepi32_ps(fix32);
}
SkNx(float a, float b, float c, float d,
float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {}
void store(float vals[8]) const { _mm256_storeu_ps(vals, fVec); }
void toBytes(uint8_t bytes[8]) const {
__m256i fix32 = _mm256_cvttps_epi32(fVec);
__m128i lo32 = _mm256_extractf128_si256(fix32, 0),
hi32 = _mm256_extractf128_si256(fix32, 1),
fix16 = _mm_packus_epi32(lo32, hi32),
fix8 = _mm_packus_epi16(fix16, fix16);
_mm_storel_epi64((__m128i*)bytes, fix8);
}
SkNx operator + (const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec); }
SkNx operator - (const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec); }
@ -87,6 +70,25 @@ public:
__m256 fVec;
};
template<> inline Sk8b SkNx_cast<uint8_t, float, 8>(const Sk8f& src) {
__m256i _32 = _mm256_cvttps_epi32(src.fVec);
__m128i lo = _mm256_extractf128_si256(_32, 0),
hi = _mm256_extractf128_si256(_32, 1),
_16 = _mm_packus_epi32(lo, hi);
return _mm_packus_epi16(_16, _16);
}
template<> inline Sk8f SkNx_cast<float, uint8_t, 8>(const Sk8b& src) {
/* TODO lo = _mm_cvtepu8_epi32(src.fVec),
* hi = _mm_cvtepu8_epi32(_mm_srli_si128(src.fVec, 4))
*/
__m128i _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()),
lo = _mm_unpacklo_epi16(_16, _mm_setzero_si128()),
hi = _mm_unpackhi_epi16(_16, _mm_setzero_si128());
__m256i _32 = _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
return _mm256_cvtepi32_ps(_32);
}
} // namespace
#endif//SkNx_avx_DEFINED

View File

@ -150,31 +150,9 @@ public:
SkNx() {}
SkNx(float val) : fVec(vdupq_n_f32(val)) {}
static SkNx Load(const float vals[4]) { return vld1q_f32(vals); }
static SkNx FromBytes(const uint8_t vals[4]) {
uint8x8_t fix8 = (uint8x8_t)vld1_dup_u32((const uint32_t*)vals);
uint16x8_t fix8_16 = vmovl_u8(fix8);
uint32x4_t fix8_32 = vmovl_u16(vget_low_u16(fix8_16));
return SkNx(vcvtq_f32_u32(fix8_32));
}
SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
void store(float vals[4]) const { vst1q_f32(vals, fVec); }
void toBytes(uint8_t bytes[4]) const {
uint32x4_t fix8_32 = vcvtq_u32_f32(fVec);
uint16x4_t fix8_16 = vqmovn_u32(fix8_32);
uint8x8_t fix8 = vqmovn_u16(vcombine_u16(fix8_16, vdup_n_u16(0)));
vst1_lane_u32((uint32_t*)bytes, (uint32x2_t)fix8, 0);
}
static void ToBytes(uint8_t bytes[16],
const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
vst1q_u8(bytes, vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
(uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
(uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]);
}
SkNx approxInvert() const {
float32x4_t est0 = vrecpeq_f32(fVec),
est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
@ -287,6 +265,24 @@ public:
uint16x8_t fVec;
};
template <>
class SkNx<4, uint8_t> {
public:
SkNx(const uint8x8_t& vec) : fVec(vec) {}
SkNx() {}
static SkNx Load(const uint8_t vals[4]) {
return (uint8x8_t)vld1_dup_u32((const uint32_t*)vals);
}
void store(uint8_t vals[4]) const {
return vst1_lane_u32((uint32_t*)vals, (uint32x2_t)fVec, 0);
}
// TODO as needed
uint8x8_t fVec;
};
template <>
class SkNx<16, uint8_t> {
public:
@ -329,11 +325,30 @@ public:
#undef SHIFT16
#undef SHIFT8
template<>
inline SkNx<4, int> SkNx_cast<int, float, 4>(const SkNx<4, float>& src) {
template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) {
return vcvtq_s32_f32(src.fVec);
}
template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) {
uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
uint16x4_t _16 = vqmovn_u32(_32);
return vqmovn_u16(vcombine_u16(_16, _16));
}
template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) {
uint16x8_t _16 = vmovl_u8 (src.fVec) ;
uint32x4_t _32 = vmovl_u16(vget_low_u16(_16));
return vcvtq_f32_u32(_32);
}
static inline void Sk4f_ToBytes(uint8_t bytes[16],
const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
vst1q_u8(bytes, vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
(uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
(uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]);
}
} // namespace
#endif//SkNx_neon_DEFINED

View File

@ -111,37 +111,9 @@ public:
SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
static SkNx Load(const float vals[4]) { return _mm_loadu_ps(vals); }
static SkNx FromBytes(const uint8_t bytes[4]) {
__m128i fix8 = _mm_cvtsi32_si128(*(const int*)bytes);
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const char _ = ~0; // Zero these bytes.
__m128i fix8_32 = _mm_shuffle_epi8(fix8, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
#else
__m128i fix8_16 = _mm_unpacklo_epi8 (fix8, _mm_setzero_si128()),
fix8_32 = _mm_unpacklo_epi16(fix8_16, _mm_setzero_si128());
#endif
return SkNx(_mm_cvtepi32_ps(fix8_32));
// TODO: use _mm_cvtepu8_epi32 w/SSE4.1?
}
SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
void store(float vals[4]) const { _mm_storeu_ps(vals, fVec); }
void toBytes(uint8_t bytes[4]) const {
__m128i fix8_32 = _mm_cvttps_epi32(fVec),
fix8_16 = _mm_packus_epi16(fix8_32, fix8_32),
fix8 = _mm_packus_epi16(fix8_16, fix8_16);
*(int*)bytes = _mm_cvtsi128_si32(fix8);
}
static void ToBytes(uint8_t bytes[16],
const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
_mm_storeu_si128((__m128i*)bytes,
_mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
_mm_cvttps_epi32(b.fVec)),
_mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
_mm_cvttps_epi32(d.fVec))));
}
SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
@ -252,6 +224,34 @@ public:
__m128i fVec;
};
template <>
class SkNx<4, uint8_t> {
public:
SkNx(const __m128i& vec) : fVec(vec) {}
SkNx() {}
static SkNx Load(const uint8_t vals[4]) { return _mm_cvtsi32_si128(*(const int*)vals); }
void store(uint8_t vals[4]) const { *(int*)vals = _mm_cvtsi128_si32(fVec); }
// TODO as needed
__m128i fVec;
};
template <>
class SkNx<8, uint8_t> {
public:
SkNx(const __m128i& vec) : fVec(vec) {}
SkNx() {}
static SkNx Load(const uint8_t vals[8]) { return _mm_loadl_epi64((const __m128i*)vals); }
void store(uint8_t vals[8]) const { _mm_storel_epi64((__m128i*)vals, fVec); }
// TODO as needed
__m128i fVec;
};
template <>
class SkNx<16, uint8_t> {
public:
@ -296,11 +296,42 @@ public:
};
template<>
inline SkNx<4, int> SkNx_cast<int, float, 4>(const SkNx<4, float>& src) {
template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) {
return _mm_cvttps_epi32(src.fVec);
}
template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) {
auto _32 = _mm_cvttps_epi32(src.fVec);
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
#else
auto _16 = _mm_packus_epi16(_32, _32);
return _mm_packus_epi16(_16, _16);
#endif
}
template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
#else
auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()),
_32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128());
#endif
return _mm_cvtepi32_ps(_32);
}
static inline void Sk4f_ToBytes(uint8_t bytes[16],
const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
_mm_storeu_si128((__m128i*)bytes,
_mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
_mm_cvttps_epi32(b.fVec)),
_mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
_mm_cvttps_epi32(d.fVec))));
}
} // namespace
#endif//SkNx_sse_DEFINED

View File

@ -265,11 +265,11 @@ public:
private:
static Sk4f Load(SkPMColor c) {
return Sk4f::FromBytes((uint8_t*)&c) * Sk4f(1.0f/255);
return SkNx_cast<float>(Sk4b::Load((uint8_t*)&c)) * Sk4f(1.0f/255);
}
static SkPMColor Round(const Sk4f& f) {
SkPMColor c;
(f * Sk4f(255) + Sk4f(0.5f)).toBytes((uint8_t*)&c);
SkNx_cast<uint8_t>(f * Sk4f(255) + Sk4f(0.5f)).store((uint8_t*)&c);
return c;
}
inline SkPMColor xfer32(SkPMColor dst, SkPMColor src) const {

View File

@ -207,21 +207,6 @@ DEF_TEST(Sk4px_widening, r) {
REPORTER_ASSERT(r, 0 == memcmp(&wideLoHi, &wideLoHiAlt, sizeof(wideLoHi)));
}
DEF_TEST(Sk4f_toBytes, r) {
uint8_t bytes[4];
// toBytes truncates, not rounds.
Sk4f(0.7f).toBytes(bytes);
REPORTER_ASSERT(r, bytes[0] == 0);
// Clamping edge cases.
Sk4f(-2.0f, -0.7f, 255.9f, 256.0f).toBytes(bytes);
REPORTER_ASSERT(r, bytes[0] == 0);
REPORTER_ASSERT(r, bytes[1] == 0);
REPORTER_ASSERT(r, bytes[2] == 255);
REPORTER_ASSERT(r, bytes[3] == 255);
}
DEF_TEST(SkNx_cast, r) {
Sk4f fs(-1.7f, -1.4f, 0.5f, 1.9f);
Sk4i is = SkNx_cast<int>(fs);