reland: We can mask load and store with just AVX
Originally reviewed here: https://skia-review.googlesource.com/c/17452/ CQ_INCLUDE_TRYBOTS=skia.primary:Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind Change-Id: I2e593e897ce93147ec593c2a5de143217274ba2a Reviewed-on: https://skia-review.googlesource.com/18267 Reviewed-by: Mike Klein <mtklein@chromium.org> Reviewed-by: Herb Derby <herb@google.com> Commit-Queue: Mike Klein <mtklein@chromium.org>
This commit is contained in:
parent
8bf1f9ffcf
commit
823103384c
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -176,8 +176,8 @@ SI void store(T* dst, V v, size_t tail) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// AVX2 adds some mask loads and stores that make for shorter, faster code.
|
||||
#if defined(JUMPER) && defined(__AVX2__)
|
||||
// AVX adds some mask loads and stores that make for shorter, faster code.
|
||||
#if defined(JUMPER) && defined(__AVX__)
|
||||
SI U32 mask(size_t tail) {
|
||||
// We go a little out of our way to avoid needing large constant values here.
|
||||
|
||||
@ -186,14 +186,16 @@ SI void store(T* dst, V v, size_t tail) {
|
||||
uint64_t mask = 0xffffffffffffffff >> 8*(kStride-tail);
|
||||
|
||||
// Sign-extend each mask lane to its full width, 0x00000000 or 0xffffffff.
|
||||
return _mm256_cvtepi8_epi32(_mm_cvtsi64_si128((int64_t)mask));
|
||||
using S8 = int8_t __attribute__((ext_vector_type(8)));
|
||||
using S32 = int32_t __attribute__((ext_vector_type(8)));
|
||||
return (U32)__builtin_convertvector(unaligned_load<S8>(&mask), S32);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline U32 load(const uint32_t* src, size_t tail) {
|
||||
__builtin_assume(tail < kStride);
|
||||
if (__builtin_expect(tail, 0)) {
|
||||
return _mm256_maskload_epi32((const int*)src, mask(tail));
|
||||
return (U32)_mm256_maskload_ps((const float*)src, mask(tail));
|
||||
}
|
||||
return unaligned_load<U32>(src);
|
||||
}
|
||||
@ -202,7 +204,7 @@ SI void store(T* dst, V v, size_t tail) {
|
||||
inline void store(uint32_t* dst, U32 v, size_t tail) {
|
||||
__builtin_assume(tail < kStride);
|
||||
if (__builtin_expect(tail, 0)) {
|
||||
return _mm256_maskstore_epi32((int*)dst, mask(tail), v);
|
||||
return _mm256_maskstore_ps((float*)dst, mask(tail), (F)v);
|
||||
}
|
||||
unaligned_store(dst, v);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user