Revert of SkNx refactoring (patchset #4 id:60001 of https://codereview.chromium.org/1690633003/ )
Reason for revert: Precautionary revert for chromium:586487 Original issue's description: > SkNx refactoring > > - add back Sk4i typedef > - define SSE casts in terms of Sk4i > * uint8 <-> float becomes uint8 <-> int <-> float > * uint16 <-> float becomes uint16 <-> int <-> float > > This has the nice side effect of specializing uint8 <-> int > and uint16 <-> int, which are useful in their own right. > > There are many cast specializations now, some of which call each other. > I have tried to arrange them in some sort of sensible order, subject to > the constraint that those called must precede those who call. > > BUG=skia: > GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1690633003 > CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot > > Committed: https://skia.googlesource.com/skia/+/c1eb311f4e98934476f1b2ad5d6de772cf140d60 TBR=herb@google.com,mtklein@chromium.org # Not skipping CQ checks because original CL landed more than 1 days ago. BUG=chromium:586487 Review URL: https://codereview.chromium.org/1696903002
This commit is contained in:
parent
fed90d4712
commit
97120a7ed1
@ -199,8 +199,6 @@ typedef SkNx<16, uint8_t> Sk16b;
|
|||||||
typedef SkNx<4, uint16_t> Sk4h;
|
typedef SkNx<4, uint16_t> Sk4h;
|
||||||
typedef SkNx<16, uint16_t> Sk16h;
|
typedef SkNx<16, uint16_t> Sk16h;
|
||||||
|
|
||||||
typedef SkNx<4, int> Sk4i;
|
|
||||||
|
|
||||||
// Include platform specific specializations if available.
|
// Include platform specific specializations if available.
|
||||||
#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
|
#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
|
||||||
#include "../opts/SkNx_sse.h"
|
#include "../opts/SkNx_sse.h"
|
||||||
|
@ -295,37 +295,8 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
|
template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
|
||||||
return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
|
auto _32 = _mm_cvttps_epi32(src.fVec);
|
||||||
}
|
|
||||||
template<> /*static*/ inline Sk4i SkNx_cast< int, uint8_t>(const Sk4b& src) {
|
|
||||||
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
|
|
||||||
const int _ = ~0;
|
|
||||||
return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
|
|
||||||
#else
|
|
||||||
return _mm_unpacklo_epi16(SkNx_cast<uint16_t>(src).fVec, _mm_setzero_si128());
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
|
|
||||||
return _mm_packus_epi16(src.fVec, src.fVec);
|
|
||||||
}
|
|
||||||
template<> /*static*/ inline Sk4i SkNx_cast< int, uint16_t>(const Sk4h& src) {
|
|
||||||
return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
|
|
||||||
}
|
|
||||||
|
|
||||||
template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) {
|
|
||||||
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
|
|
||||||
const int _ = ~0;
|
|
||||||
return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
|
|
||||||
#else
|
|
||||||
// We're on our way to 8-bit anyway, so we don't care that _mm_packs_epi32 clamps to int16_t.
|
|
||||||
Sk4h _16 = _mm_packs_epi32(src.fVec, src.fVec);
|
|
||||||
return SkNx_cast<uint8_t>(_16);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) {
|
|
||||||
auto _32 = src.fVec;
|
|
||||||
// Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+.
|
// Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+.
|
||||||
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
|
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
|
||||||
// With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
|
// With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
|
||||||
@ -338,34 +309,48 @@ template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> /*static*/ inline Sk4f SkNx_cast<float, int>(const Sk4i& src) {
|
template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
|
||||||
return _mm_cvtepi32_ps(src.fVec);
|
auto _32 = _mm_cvttps_epi32(src.fVec);
|
||||||
}
|
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
|
||||||
template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
|
const int _ = ~0;
|
||||||
return SkNx_cast<float>(SkNx_cast<int>(src));
|
return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
|
||||||
}
|
#else
|
||||||
template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
|
auto _16 = _mm_packus_epi16(_32, _32);
|
||||||
return SkNx_cast<float>(SkNx_cast<int>(src));
|
return _mm_packus_epi16(_16, _16);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
template<> /*static*/ inline Sk4i SkNx_cast< int, float>(const Sk4f& src) {
|
template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
|
||||||
return _mm_cvttps_epi32(src.fVec);
|
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
|
||||||
|
const int _ = ~0;
|
||||||
|
auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
|
||||||
|
#else
|
||||||
|
auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()),
|
||||||
|
_32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128());
|
||||||
|
#endif
|
||||||
|
return _mm_cvtepi32_ps(_32);
|
||||||
}
|
}
|
||||||
template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
|
|
||||||
return SkNx_cast<uint16_t>(SkNx_cast<int>(src));
|
template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
|
||||||
}
|
auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
|
||||||
template<> /*static*/ inline Sk4b SkNx_cast< uint8_t, float>(const Sk4f& src) {
|
return _mm_cvtepi32_ps(_32);
|
||||||
return SkNx_cast<uint8_t>(SkNx_cast<int>(src));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void Sk4f_ToBytes(uint8_t bytes[16],
|
static inline void Sk4f_ToBytes(uint8_t bytes[16],
|
||||||
const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
|
const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
|
||||||
// We're on our way to 8-bit anyway, so we don't care that _mm_packs_epi32 clamps to int16_t.
|
|
||||||
_mm_storeu_si128((__m128i*)bytes,
|
_mm_storeu_si128((__m128i*)bytes,
|
||||||
_mm_packus_epi16(_mm_packs_epi32(_mm_cvttps_epi32(a.fVec),
|
_mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
|
||||||
_mm_cvttps_epi32(b.fVec)),
|
_mm_cvttps_epi32(b.fVec)),
|
||||||
_mm_packs_epi32(_mm_cvttps_epi32(c.fVec),
|
_mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
|
||||||
_mm_cvttps_epi32(d.fVec))));
|
_mm_cvttps_epi32(d.fVec))));
|
||||||
|
}
|
||||||
|
|
||||||
|
template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
|
||||||
|
return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
|
||||||
|
}
|
||||||
|
|
||||||
|
template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
|
||||||
|
return _mm_packus_epi16(src.fVec, src.fVec);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif//SkNx_sse_DEFINED
|
#endif//SkNx_sse_DEFINED
|
||||||
|
Loading…
Reference in New Issue
Block a user