2019-02-05 18:42:46 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2019 Google Inc.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef SKVX_DEFINED
|
|
|
|
#define SKVX_DEFINED
|
|
|
|
|
|
|
|
// skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
|
|
|
|
//
|
|
|
|
// This time we're leaning a bit less on platform-specific intrinsics and a bit
|
|
|
|
// more on Clang/GCC vector extensions, but still keeping the option open to
|
|
|
|
// drop in platform-specific intrinsics, actually more easily than before.
|
|
|
|
//
|
|
|
|
// We've also fixed a few of the caveats that used to make SkNx awkward to work
|
|
|
|
// with across translation units. skvx::Vec<N,T> always has N*sizeof(T) size
|
2020-09-02 14:00:57 +00:00
|
|
|
// and alignment and is safe to use across translation units freely.
|
|
|
|
// Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-06-07 15:57:58 +00:00
|
|
|
// Please try to keep this file independent of Skia headers.
|
2019-02-07 14:49:17 +00:00
|
|
|
#include <algorithm> // std::min, std::max
|
2019-02-27 16:24:55 +00:00
|
|
|
#include <cmath> // std::ceil, std::floor, std::trunc, std::round, std::sqrt, etc.
|
2019-02-05 18:42:46 +00:00
|
|
|
#include <cstdint> // intXX_t
|
|
|
|
#include <cstring> // memcpy()
|
|
|
|
#include <initializer_list> // std::initializer_list
|
|
|
|
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__)
|
2019-02-07 14:49:17 +00:00
|
|
|
#include <immintrin.h>
|
2019-06-07 15:57:58 +00:00
|
|
|
#elif defined(__ARM_NEON)
|
2019-02-07 14:49:17 +00:00
|
|
|
#include <arm_neon.h>
|
2020-09-02 14:00:57 +00:00
|
|
|
#elif defined(__wasm_simd128__)
|
2020-06-30 22:08:44 +00:00
|
|
|
#include <wasm_simd128.h>
|
|
|
|
#endif
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
// To avoid ODR violations, all methods must be force-inlined...
|
2019-12-17 17:40:14 +00:00
|
|
|
#if defined(_MSC_VER)
|
|
|
|
#define SKVX_ALWAYS_INLINE __forceinline
|
|
|
|
#else
|
|
|
|
#define SKVX_ALWAYS_INLINE __attribute__((always_inline))
|
|
|
|
#endif
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
// ... and all standalone functions must be static. Please use these helpers:
|
|
|
|
#define SI static inline
|
|
|
|
#define SIT template < typename T> SI
|
|
|
|
#define SIN template <int N > SI
|
|
|
|
#define SINT template <int N, typename T> SI
|
2019-12-17 17:40:14 +00:00
|
|
|
#define SINTU template <int N, typename T, typename U, \
|
2020-09-02 14:00:57 +00:00
|
|
|
typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI
|
2019-02-27 16:24:55 +00:00
|
|
|
|
2019-02-05 18:42:46 +00:00
|
|
|
namespace skvx {
|
|
|
|
|
|
|
|
// All Vec have the same simple memory layout, the same as `T vec[N]`.
|
|
|
|
template <int N, typename T>
|
2020-09-02 14:00:57 +00:00
|
|
|
struct alignas(N*sizeof(T)) Vec {
|
2019-04-16 16:36:55 +00:00
|
|
|
static_assert((N & (N-1)) == 0, "N must be a power of 2.");
|
|
|
|
static_assert(sizeof(T) >= alignof(T), "What kind of crazy T is this?");
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
Vec<N/2,T> lo, hi;
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-06 16:56:58 +00:00
|
|
|
// Methods belong here in the class declaration of Vec only if:
|
|
|
|
// - they must be here, like constructors or operator[];
|
|
|
|
// - they'll definitely never want a specialized implementation.
|
|
|
|
// Other operations on Vec should be defined outside the type.
|
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE Vec() = default;
|
2019-03-14 18:30:42 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
template <typename U, typename=std::enable_if_t<std::is_convertible<U,T>::value>>
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE
|
2019-03-14 18:30:42 +00:00
|
|
|
Vec(U x) : lo(x), hi(x) {}
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
|
2019-02-07 14:49:17 +00:00
|
|
|
T vals[N] = {0};
|
|
|
|
memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
lo = Vec<N/2,T>::Load(vals + 0);
|
|
|
|
hi = Vec<N/2,T>::Load(vals + N/2);
|
2019-02-05 18:42:46 +00:00
|
|
|
}
|
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE T operator[](int i) const { return i < N/2 ? lo[i] : hi[i-N/2]; }
|
|
|
|
SKVX_ALWAYS_INLINE T& operator[](int i) { return i < N/2 ? lo[i] : hi[i-N/2]; }
|
2019-02-06 16:56:58 +00:00
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
|
2019-02-06 16:56:58 +00:00
|
|
|
Vec v;
|
|
|
|
memcpy(&v, ptr, sizeof(Vec));
|
|
|
|
return v;
|
|
|
|
}
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE void store(void* ptr) const {
|
2019-02-06 16:56:58 +00:00
|
|
|
memcpy(ptr, this, sizeof(Vec));
|
|
|
|
}
|
2019-02-05 18:42:46 +00:00
|
|
|
};
|
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
template <typename T>
|
|
|
|
struct Vec<1,T> {
|
|
|
|
T val;
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE Vec() = default;
|
2019-03-14 18:30:42 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
template <typename U, typename=std::enable_if_t<std::is_convertible<U,T>::value>>
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE
|
2019-03-14 18:30:42 +00:00
|
|
|
Vec(U x) : val(x) {}
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE T operator[](int) const { return val; }
|
|
|
|
SKVX_ALWAYS_INLINE T& operator[](int) { return val; }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
|
2019-02-07 14:49:17 +00:00
|
|
|
Vec v;
|
|
|
|
memcpy(&v, ptr, sizeof(Vec));
|
|
|
|
return v;
|
|
|
|
}
|
2019-12-17 17:40:14 +00:00
|
|
|
SKVX_ALWAYS_INLINE void store(void* ptr) const {
|
2019-02-07 14:49:17 +00:00
|
|
|
memcpy(ptr, this, sizeof(Vec));
|
|
|
|
}
|
|
|
|
};
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
template <typename D, typename S>
|
2020-09-02 14:00:57 +00:00
|
|
|
SI D unchecked_bit_pun(const S& s) {
|
2019-02-07 14:49:17 +00:00
|
|
|
D d;
|
|
|
|
memcpy(&d, &s, sizeof(D));
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
2020-07-10 20:46:46 +00:00
|
|
|
template <typename D, typename S>
|
2020-09-02 14:00:57 +00:00
|
|
|
SI D bit_pun(const S& s) {
|
2020-07-10 20:46:46 +00:00
|
|
|
static_assert(sizeof(D) == sizeof(S), "");
|
|
|
|
return unchecked_bit_pun<D>(s);
|
|
|
|
}
|
|
|
|
|
2019-02-05 18:42:46 +00:00
|
|
|
// Translate from a value type T to its corresponding Mask, the result of a comparison.
|
2019-02-09 18:48:54 +00:00
|
|
|
template <typename T> struct Mask { using type = T; };
|
|
|
|
template <> struct Mask<float > { using type = int32_t; };
|
|
|
|
template <> struct Mask<double> { using type = int64_t; };
|
|
|
|
template <typename T> using M = typename Mask<T>::type;
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
// Join two Vec<N,T> into one Vec<2N,T>.
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
|
2019-02-07 14:49:17 +00:00
|
|
|
Vec<2*N,T> v;
|
|
|
|
v.lo = lo;
|
|
|
|
v.hi = hi;
|
|
|
|
return v;
|
2019-02-05 18:42:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We have two default strategies for implementing most operations:
|
|
|
|
// 1) lean on Clang/GCC vector extensions when available;
|
2019-02-07 14:49:17 +00:00
|
|
|
// 2) recurse to scalar portable implementations when not.
|
|
|
|
// At the end we can drop in platform-specific implementations that override either default.
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-06 16:56:58 +00:00
|
|
|
#if !defined(SKNX_NO_SIMD) && (defined(__clang__) || defined(__GNUC__))
|
2019-02-05 18:42:46 +00:00
|
|
|
|
|
|
|
// VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
|
|
|
|
// N.B. VExt<N,T> alignment is N*alignof(T), stricter than Vec<N,T>'s alignof(T).
|
|
|
|
#if defined(__clang__)
|
|
|
|
template <int N, typename T>
|
|
|
|
using VExt = T __attribute__((ext_vector_type(N)));
|
|
|
|
|
|
|
|
#elif defined(__GNUC__)
|
|
|
|
template <int N, typename T>
|
|
|
|
struct VExtHelper {
|
|
|
|
typedef T __attribute__((vector_size(N*sizeof(T)))) type;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <int N, typename T>
|
|
|
|
using VExt = typename VExtHelper<N,T>::type;
|
2019-02-07 14:49:17 +00:00
|
|
|
|
|
|
|
// For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
|
|
|
|
// to_vec<N,T>() below for N=4 and T=float. This workaround seems to help...
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
|
2019-02-05 18:42:46 +00:00
|
|
|
#endif
|
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
|
|
|
|
SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return to_vec<N,T>(to_vext(x) + to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return to_vec<N,T>(to_vext(x) - to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return to_vec<N,T>(to_vext(x) * to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return to_vec<N,T>(to_vext(x) / to_vext(y));
|
|
|
|
}
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return to_vec<N,T>(to_vext(x) ^ to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return to_vec<N,T>(to_vext(x) & to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return to_vec<N,T>(to_vext(x) | to_vext(y));
|
|
|
|
}
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
|
|
|
|
SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
|
|
|
|
SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); }
|
|
|
|
SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); }
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return bit_pun<Vec<N,M<T>>>(to_vext(x) < to_vext(y));
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return bit_pun<Vec<N,M<T>>>(to_vext(x) > to_vext(y));
|
|
|
|
}
|
2019-02-05 18:42:46 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
// Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
|
|
|
|
// We'll implement things portably, in a way that should be easily autovectorizable.
|
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
// N == 1 scalar implementations.
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
|
|
|
|
SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
|
|
|
|
SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
|
|
|
|
SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
|
|
|
|
SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
|
|
|
|
SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
|
|
|
|
SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
|
|
|
|
SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
|
|
|
|
SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
|
|
|
|
return x.val == y.val ? ~0 : 0;
|
|
|
|
}
|
|
|
|
SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
|
|
|
|
return x.val != y.val ? ~0 : 0;
|
|
|
|
}
|
|
|
|
SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
|
|
|
|
return x.val <= y.val ? ~0 : 0;
|
|
|
|
}
|
|
|
|
SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
|
|
|
|
return x.val >= y.val ? ~0 : 0;
|
|
|
|
}
|
|
|
|
SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
|
|
|
|
return x.val < y.val ? ~0 : 0;
|
|
|
|
}
|
|
|
|
SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
|
|
|
|
return x.val > y.val ? ~0 : 0;
|
|
|
|
}
|
2019-02-07 14:49:17 +00:00
|
|
|
|
|
|
|
// All default N != 1 implementations just recurse on lo and hi halves.
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo + y.lo, x.hi + y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo - y.lo, x.hi - y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo * y.lo, x.hi * y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo / y.lo, x.hi / y.hi);
|
|
|
|
}
|
2019-04-16 17:07:23 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo ^ y.lo, x.hi ^ y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo & y.lo, x.hi & y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo | y.lo, x.hi | y.hi);
|
|
|
|
}
|
2019-04-16 17:07:23 +00:00
|
|
|
|
|
|
|
SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
|
|
|
|
SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
|
|
|
|
SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
|
|
|
|
SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }
|
2019-04-16 17:07:23 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo == y.lo, x.hi == y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo != y.lo, x.hi != y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo <= y.lo, x.hi <= y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo >= y.lo, x.hi >= y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo < y.lo, x.hi < y.hi);
|
|
|
|
}
|
|
|
|
SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(x.lo > y.lo, x.hi > y.hi);
|
|
|
|
}
|
2019-02-07 14:49:17 +00:00
|
|
|
#endif
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
// Some operations we want are not expressible with Clang/GCC vector
|
|
|
|
// extensions, so we implement them using the recursive approach.
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
// N == 1 scalar implementations.
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
|
2020-07-10 20:46:46 +00:00
|
|
|
// In practice this scalar implementation is unlikely to be used. See if_then_else() below.
|
|
|
|
return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) |
|
|
|
|
(~cond & bit_pun<Vec<1, M<T>>>(e)) );
|
2019-02-07 14:49:17 +00:00
|
|
|
}
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT bool any(const Vec<1,T>& x) { return x.val != 0; }
|
|
|
|
SIT bool all(const Vec<1,T>& x) { return x.val != 0; }
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT T min(const Vec<1,T>& x) { return x.val; }
|
|
|
|
SIT T max(const Vec<1,T>& x) { return x.val; }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT Vec<1,T> min(const Vec<1,T>& x, const Vec<1,T>& y) { return std::min(x.val, y.val); }
|
|
|
|
SIT Vec<1,T> max(const Vec<1,T>& x, const Vec<1,T>& y) { return std::max(x.val, y.val); }
|
2020-05-04 13:26:15 +00:00
|
|
|
SIT Vec<1,T> pow(const Vec<1,T>& x, const Vec<1,T>& y) { return std::pow(x.val, y.val); }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2020-04-30 16:06:23 +00:00
|
|
|
SIT Vec<1,T> atan(const Vec<1,T>& x) { return std:: atan(x.val); }
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT Vec<1,T> ceil(const Vec<1,T>& x) { return std:: ceil(x.val); }
|
|
|
|
SIT Vec<1,T> floor(const Vec<1,T>& x) { return std::floor(x.val); }
|
|
|
|
SIT Vec<1,T> trunc(const Vec<1,T>& x) { return std::trunc(x.val); }
|
|
|
|
SIT Vec<1,T> round(const Vec<1,T>& x) { return std::round(x.val); }
|
|
|
|
SIT Vec<1,T> sqrt(const Vec<1,T>& x) { return std:: sqrt(x.val); }
|
|
|
|
SIT Vec<1,T> abs(const Vec<1,T>& x) { return std:: abs(x.val); }
|
2020-04-29 14:56:56 +00:00
|
|
|
SIT Vec<1,T> sin(const Vec<1,T>& x) { return std:: sin(x.val); }
|
|
|
|
SIT Vec<1,T> cos(const Vec<1,T>& x) { return std:: cos(x.val); }
|
|
|
|
SIT Vec<1,T> tan(const Vec<1,T>& x) { return std:: tan(x.val); }
|
2019-02-05 18:42:46 +00:00
|
|
|
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
SIT Vec<1,int> lrint(const Vec<1,T>& x) { return (int)std::lrint(x.val); }
|
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SIT Vec<1,T> rcp(const Vec<1,T>& x) { return 1 / x.val; }
|
|
|
|
SIT Vec<1,T> rsqrt(const Vec<1,T>& x) { return rcp(sqrt(x)); }
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
// All default N != 1 implementations just recurse on lo and hi halves.
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
|
2020-07-10 20:46:46 +00:00
|
|
|
// Specializations inline here so they can generalize what types the apply to.
|
|
|
|
// (This header is used in C++14 contexts, so we have to kind of fake constexpr if.)
|
2020-09-16 03:08:24 +00:00
|
|
|
#if defined(__AVX2__)
|
2020-09-15 20:26:22 +00:00
|
|
|
if /*constexpr*/ (N*sizeof(T) == 32) {
|
|
|
|
return unchecked_bit_pun<Vec<N,T>>(_mm256_blendv_epi8(unchecked_bit_pun<__m256i>(e),
|
|
|
|
unchecked_bit_pun<__m256i>(t),
|
|
|
|
unchecked_bit_pun<__m256i>(cond)));
|
2020-07-10 20:46:46 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(__SSE4_1__)
|
2020-09-15 20:26:22 +00:00
|
|
|
if /*constexpr*/ (N*sizeof(T) == 16) {
|
|
|
|
return unchecked_bit_pun<Vec<N,T>>(_mm_blendv_epi8(unchecked_bit_pun<__m128i>(e),
|
|
|
|
unchecked_bit_pun<__m128i>(t),
|
|
|
|
unchecked_bit_pun<__m128i>(cond)));
|
2020-07-10 20:46:46 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(__ARM_NEON)
|
2020-09-15 20:26:22 +00:00
|
|
|
if /*constexpr*/ (N*sizeof(T) == 16) {
|
|
|
|
return unchecked_bit_pun<Vec<N,T>>(vbslq_u8(unchecked_bit_pun<uint8x16_t>(cond),
|
|
|
|
unchecked_bit_pun<uint8x16_t>(t),
|
|
|
|
unchecked_bit_pun<uint8x16_t>(e)));
|
2020-07-10 20:46:46 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
// Recurse for large vectors to try to hit the specializations above.
|
2020-09-15 20:26:22 +00:00
|
|
|
if /*constexpr*/ (N*sizeof(T) > 16) {
|
2020-07-10 20:46:46 +00:00
|
|
|
return join(if_then_else(cond.lo, t.lo, e.lo),
|
|
|
|
if_then_else(cond.hi, t.hi, e.hi));
|
|
|
|
}
|
|
|
|
// This default can lead to better code than the recursing onto scalars.
|
|
|
|
return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) |
|
|
|
|
(~cond & bit_pun<Vec<N, M<T>>>(e)) );
|
2019-02-05 18:42:46 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT bool any(const Vec<N,T>& x) { return any(x.lo) || any(x.hi); }
|
|
|
|
SINT bool all(const Vec<N,T>& x) { return all(x.lo) && all(x.hi); }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
|
|
|
|
SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(min(x.lo, y.lo), min(x.hi, y.hi));
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(max(x.lo, y.lo), max(x.hi, y.hi));
|
|
|
|
}
|
|
|
|
SINT Vec<N,T> pow(const Vec<N,T>& x, const Vec<N,T>& y) {
|
|
|
|
return join(pow(x.lo, y.lo), pow(x.hi, y.hi));
|
|
|
|
}
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2020-04-30 16:06:23 +00:00
|
|
|
SINT Vec<N,T> atan(const Vec<N,T>& x) { return join( atan(x.lo), atan(x.hi)); }
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT Vec<N,T> ceil(const Vec<N,T>& x) { return join( ceil(x.lo), ceil(x.hi)); }
|
|
|
|
SINT Vec<N,T> floor(const Vec<N,T>& x) { return join(floor(x.lo), floor(x.hi)); }
|
|
|
|
SINT Vec<N,T> trunc(const Vec<N,T>& x) { return join(trunc(x.lo), trunc(x.hi)); }
|
|
|
|
SINT Vec<N,T> round(const Vec<N,T>& x) { return join(round(x.lo), round(x.hi)); }
|
|
|
|
SINT Vec<N,T> sqrt(const Vec<N,T>& x) { return join( sqrt(x.lo), sqrt(x.hi)); }
|
|
|
|
SINT Vec<N,T> abs(const Vec<N,T>& x) { return join( abs(x.lo), abs(x.hi)); }
|
2020-04-29 14:56:56 +00:00
|
|
|
SINT Vec<N,T> sin(const Vec<N,T>& x) { return join( sin(x.lo), sin(x.hi)); }
|
|
|
|
SINT Vec<N,T> cos(const Vec<N,T>& x) { return join( cos(x.lo), cos(x.hi)); }
|
|
|
|
SINT Vec<N,T> tan(const Vec<N,T>& x) { return join( tan(x.lo), tan(x.hi)); }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
SINT Vec<N,int> lrint(const Vec<N,T>& x) { return join(lrint(x.lo), lrint(x.hi)); }
|
|
|
|
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT Vec<N,T> rcp(const Vec<N,T>& x) { return join( rcp(x.lo), rcp(x.hi)); }
|
|
|
|
SINT Vec<N,T> rsqrt(const Vec<N,T>& x) { return join(rsqrt(x.lo), rsqrt(x.hi)); }
|
2019-02-06 16:56:58 +00:00
|
|
|
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-06 16:56:58 +00:00
|
|
|
// Scalar/vector operations just splat the scalar to a vector...
|
2019-04-16 17:07:23 +00:00
|
|
|
SINTU Vec<N,T> operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) + y; }
|
|
|
|
SINTU Vec<N,T> operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) - y; }
|
|
|
|
SINTU Vec<N,T> operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) * y; }
|
|
|
|
SINTU Vec<N,T> operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) / y; }
|
|
|
|
SINTU Vec<N,T> operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^ y; }
|
|
|
|
SINTU Vec<N,T> operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) & y; }
|
|
|
|
SINTU Vec<N,T> operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) | y; }
|
|
|
|
SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
|
|
|
|
SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
|
|
|
|
SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
|
|
|
|
SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
|
|
|
|
SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) < y; }
|
|
|
|
SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) > y; }
|
|
|
|
SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
|
|
|
|
SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
|
2020-05-04 13:26:15 +00:00
|
|
|
SINTU Vec<N,T> pow(U x, const Vec<N,T>& y) { return pow(Vec<N,T>(x), y); }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-02-06 16:56:58 +00:00
|
|
|
// ... and same deal for vector/scalar operations.
|
2019-04-16 17:07:23 +00:00
|
|
|
SINTU Vec<N,T> operator+ (const Vec<N,T>& x, U y) { return x + Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,T> operator- (const Vec<N,T>& x, U y) { return x - Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,T> operator* (const Vec<N,T>& x, U y) { return x * Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,T> operator/ (const Vec<N,T>& x, U y) { return x / Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,T> operator^ (const Vec<N,T>& x, U y) { return x ^ Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,T> operator& (const Vec<N,T>& x, U y) { return x & Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,T> operator| (const Vec<N,T>& x, U y) { return x | Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x < Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x > Vec<N,T>(y); }
|
|
|
|
SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
|
|
|
|
SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
|
2020-05-04 13:26:15 +00:00
|
|
|
SINTU Vec<N,T> pow(const Vec<N,T>& x, U y) { return pow(x, Vec<N,T>(y)); }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-02-06 16:56:58 +00:00
|
|
|
// The various op= operators, for vectors...
|
2019-04-16 17:07:23 +00:00
|
|
|
SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
|
|
|
|
SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
|
|
|
|
SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
|
|
|
|
SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
|
|
|
|
SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
|
|
|
|
SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
|
|
|
|
SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-02-06 16:56:58 +00:00
|
|
|
// ... for scalars...
|
2019-03-14 18:30:42 +00:00
|
|
|
SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
|
|
|
|
SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
|
|
|
|
SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
|
|
|
|
SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
|
|
|
|
SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
|
|
|
|
SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
|
|
|
|
SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
|
2019-02-06 20:48:12 +00:00
|
|
|
|
2019-02-07 14:49:17 +00:00
|
|
|
// ... and for shifts.
|
|
|
|
SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
|
|
|
|
SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
|
2019-02-05 18:42:46 +00:00
|
|
|
|
2019-02-06 20:48:12 +00:00
|
|
|
// cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
|
2019-02-07 14:49:17 +00:00
|
|
|
template <typename D, typename S>
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
|
2019-02-07 14:49:17 +00:00
|
|
|
|
2019-02-06 16:56:58 +00:00
|
|
|
template <typename D, int N, typename S>
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<N,D> cast(const Vec<N,S>& src) {
|
2019-02-06 16:56:58 +00:00
|
|
|
#if !defined(SKNX_NO_SIMD) && defined(__clang__)
|
2019-04-10 17:40:31 +00:00
|
|
|
return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
|
2019-02-06 16:56:58 +00:00
|
|
|
#else
|
2019-02-07 14:49:17 +00:00
|
|
|
return join(cast<D>(src.lo), cast<D>(src.hi));
|
2019-02-06 16:56:58 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-02-06 20:48:12 +00:00
|
|
|
// Shuffle values from a vector pretty arbitrarily:
|
|
|
|
// skvx::Vec<4,float> rgba = {R,G,B,A};
|
|
|
|
// shuffle<2,1,0,3> (rgba) ~> {B,G,R,A}
|
|
|
|
// shuffle<2,1> (rgba) ~> {B,G}
|
|
|
|
// shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
|
|
|
|
// shuffle<3,3,3,3> (rgba) ~> {A,A,A,A}
|
|
|
|
// The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
|
|
|
|
template <int... Ix, int N, typename T>
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
|
2019-04-11 19:14:16 +00:00
|
|
|
#if !defined(SKNX_NO_SIMD) && defined(__clang__)
|
|
|
|
return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
|
|
|
|
#else
|
2019-02-06 20:48:12 +00:00
|
|
|
return { x[Ix]... };
|
2019-04-11 19:14:16 +00:00
|
|
|
#endif
|
2019-02-06 20:48:12 +00:00
|
|
|
}
|
2019-02-06 16:56:58 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
// fma() delivers a fused mul-add, even if that's really expensive.
|
|
|
|
SI Vec<1,float> fma(const Vec<1,float>& x, const Vec<1,float>& y, const Vec<1,float>& z) {
|
2020-03-05 16:15:35 +00:00
|
|
|
return std::fma(x.val, y.val, z.val);
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,float> fma(const Vec<N,float>& x, const Vec<N,float>& y, const Vec<N,float>& z) {
|
2020-03-05 16:15:35 +00:00
|
|
|
return join(fma(x.lo, y.lo, z.lo),
|
|
|
|
fma(x.hi, y.hi, z.hi));
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,float> fract(const Vec<N,float>& x) {
|
2020-04-30 16:06:23 +00:00
|
|
|
return x - floor(x);
|
|
|
|
}
|
|
|
|
|
2020-07-15 14:58:51 +00:00
|
|
|
// The default cases for to_half/from_half are borrowed from skcms,
|
|
|
|
// and assume inputs are finite and treat/flush denorm half floats as/to zero.
|
|
|
|
// Key constants to watch for:
|
|
|
|
// - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias;
|
|
|
|
// - a half is 16-bit, 1-5-10 sign-exponent-mantissa, with 15 exponent bias.
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) {
|
2020-07-15 14:58:51 +00:00
|
|
|
Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x),
|
|
|
|
s = sem & 0x8000'0000,
|
|
|
|
em = sem ^ s,
|
|
|
|
is_denorm = em < 0x3880'0000;
|
|
|
|
return cast<uint16_t>(if_then_else(is_denorm, Vec<N,uint32_t>(0)
|
|
|
|
, (s>>16) + (em>>13) - ((127-15)<<10)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) {
|
2020-07-15 14:58:51 +00:00
|
|
|
Vec<N,uint32_t> wide = cast<uint32_t>(x),
|
|
|
|
s = wide & 0x8000,
|
|
|
|
em = wide ^ s;
|
|
|
|
auto is_denorm = bit_pun<Vec<N,int32_t>>(em < 0x0400);
|
|
|
|
return if_then_else(is_denorm, Vec<N,float>(0)
|
|
|
|
, bit_pun<Vec<N,float>>( (s<<16) + (em<<13) + ((127-15)<<23) ));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called.
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<1,uint16_t> to_half(const Vec<1,float>& x) { return to_half_finite_ftz(x); }
|
|
|
|
SI Vec<1,float> from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); }
|
2020-07-15 14:58:51 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
|
2020-07-15 14:58:51 +00:00
|
|
|
#if defined(__F16C__)
|
|
|
|
if /*constexpr*/ (N == 8) {
|
|
|
|
return unchecked_bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(unchecked_bit_pun<__m256>(x),
|
|
|
|
_MM_FROUND_CUR_DIRECTION));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(__aarch64__)
|
|
|
|
if /*constexpr*/ (N == 4) {
|
|
|
|
return unchecked_bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(unchecked_bit_pun<float32x4_t>(x)));
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if /*constexpr*/ (N > 4) {
|
|
|
|
return join(to_half(x.lo),
|
|
|
|
to_half(x.hi));
|
|
|
|
}
|
|
|
|
return to_half_finite_ftz(x);
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
|
2020-07-15 14:58:51 +00:00
|
|
|
#if defined(__F16C__)
|
|
|
|
if /*constexpr*/ (N == 8) {
|
|
|
|
return unchecked_bit_pun<Vec<N,float>>(_mm256_cvtph_ps(unchecked_bit_pun<__m128i>(x)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if defined(__aarch64__)
|
|
|
|
if /*constexpr*/ (N == 4) {
|
2020-07-31 18:04:25 +00:00
|
|
|
return unchecked_bit_pun<Vec<N,float>>(vcvt_f32_f16(unchecked_bit_pun<float16x4_t>(x)));
|
2020-07-15 14:58:51 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if /*constexpr*/ (N > 4) {
|
|
|
|
return join(from_half(x.lo),
|
|
|
|
from_half(x.hi));
|
|
|
|
}
|
|
|
|
return from_half_finite_ftz(x);
|
|
|
|
}
|
|
|
|
|
2020-04-30 16:06:23 +00:00
|
|
|
|
2019-04-11 16:52:51 +00:00
|
|
|
// div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
|
2019-04-11 16:52:51 +00:00
|
|
|
return cast<uint8_t>( (x+127)/255 );
|
|
|
|
}
|
|
|
|
|
|
|
|
// approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
|
|
|
|
// and is always perfect when x or y is 0 or 255.
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
|
2019-04-11 16:52:51 +00:00
|
|
|
// All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
|
|
|
|
// We happen to have historically picked (x*y+x)/256.
|
|
|
|
auto X = cast<uint16_t>(x),
|
|
|
|
Y = cast<uint16_t>(y);
|
|
|
|
return cast<uint8_t>( (X*Y+X)/256 );
|
|
|
|
}
|
|
|
|
|
2019-06-07 15:57:58 +00:00
|
|
|
#if !defined(SKNX_NO_SIMD) && defined(__ARM_NEON)
|
2019-04-16 17:07:23 +00:00
|
|
|
// With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<8,uint16_t> mull(const Vec<8,uint8_t>& x,
|
|
|
|
const Vec<8,uint8_t>& y) {
|
2019-04-16 17:07:23 +00:00
|
|
|
return to_vec<8,uint16_t>(vmull_u8(to_vext(x),
|
|
|
|
to_vext(y)));
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN std::enable_if_t<(N < 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x,
|
|
|
|
const Vec<N,uint8_t>& y) {
|
2019-04-16 17:07:23 +00:00
|
|
|
// N < 8 --> double up data until N == 8, returning the part we need.
|
|
|
|
return mull(join(x,x),
|
|
|
|
join(y,y)).lo;
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN std::enable_if_t<(N > 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x,
|
|
|
|
const Vec<N,uint8_t>& y) {
|
2019-04-16 17:07:23 +00:00
|
|
|
// N > 8 --> usual join(lo,hi) strategy to recurse down to N == 8.
|
|
|
|
return join(mull(x.lo, y.lo),
|
|
|
|
mull(x.hi, y.hi));
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
// Nothing special when we don't have NEON... just cast up to 16-bit and multiply.
|
2020-09-02 14:00:57 +00:00
|
|
|
SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
|
|
|
|
const Vec<N,uint8_t>& y) {
|
2019-04-16 17:07:23 +00:00
|
|
|
return cast<uint16_t>(x)
|
|
|
|
* cast<uint16_t>(y);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-02-27 16:24:55 +00:00
|
|
|
#if !defined(SKNX_NO_SIMD)
|
2019-06-07 15:57:58 +00:00
|
|
|
|
2019-02-27 16:24:55 +00:00
|
|
|
// Platform-specific specializations and overloads can now drop in here.
|
2019-02-07 14:49:17 +00:00
|
|
|
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
#if defined(__AVX__)
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<8,float> sqrt(const Vec<8,float>& x) {
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
return bit_pun<Vec<8,float>>(_mm256_sqrt_ps(bit_pun<__m256>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<8,float> rsqrt(const Vec<8,float>& x) {
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
return bit_pun<Vec<8,float>>(_mm256_rsqrt_ps(bit_pun<__m256>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<8,float> rcp(const Vec<8,float>& x) {
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
return bit_pun<Vec<8,float>>(_mm256_rcp_ps(bit_pun<__m256>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<8,int> lrint(const Vec<8,float>& x) {
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
return bit_pun<Vec<8,int>>(_mm256_cvtps_epi32(bit_pun<__m256>(x)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-06-07 15:57:58 +00:00
|
|
|
#if defined(__SSE__)
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> sqrt(const Vec<4,float>& x) {
|
2019-04-10 17:40:31 +00:00
|
|
|
return bit_pun<Vec<4,float>>(_mm_sqrt_ps(bit_pun<__m128>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> rsqrt(const Vec<4,float>& x) {
|
2019-04-10 17:40:31 +00:00
|
|
|
return bit_pun<Vec<4,float>>(_mm_rsqrt_ps(bit_pun<__m128>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> rcp(const Vec<4,float>& x) {
|
2019-04-10 17:40:31 +00:00
|
|
|
return bit_pun<Vec<4,float>>(_mm_rcp_ps(bit_pun<__m128>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,int> lrint(const Vec<4,float>& x) {
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
return bit_pun<Vec<4,int>>(_mm_cvtps_epi32(bit_pun<__m128>(x)));
|
|
|
|
}
|
2019-04-10 17:40:31 +00:00
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,float> sqrt(const Vec<2,float>& x) {
|
2019-04-10 17:40:31 +00:00
|
|
|
return shuffle<0,1>( sqrt(shuffle<0,1,0,1>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,float> rsqrt(const Vec<2,float>& x) {
|
2019-04-10 17:40:31 +00:00
|
|
|
return shuffle<0,1>(rsqrt(shuffle<0,1,0,1>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,float> rcp(const Vec<2,float>& x) {
|
2019-04-10 17:40:31 +00:00
|
|
|
return shuffle<0,1>( rcp(shuffle<0,1,0,1>(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,int> lrint(const Vec<2,float>& x) {
|
restore Op::round
While I think trunc(mad(x, scale, 0.5)) is fine for doing our float
to fixed point conversions, round(mul(x, scale)) was kind of better
all around:
- better rounding than +0.5 and trunc
- faster when mad() is not an fma
- often now no need to use the constant 0.5f or have it in a register
- allows the mul() in to_unorm to use mul_f32_imm
Those last two points are key... this actually frees up 2 registers in
the x86 JIT when using to_unorm().
So I think maybe we can resurrect round and still guarantee our desired
intra-machine stability by committing to using instructions that follow
the current rounding mode, which is what [v]cvtps2dq inextricably uses.
Left some notes on the ARM impl... we're rounding to nearest even there,
which is probably the current mode anyway, but to be more correct we
need a slightly longer impl that rounds float->float then "truncates".
Unsure whether it matters in practice. Same deal in the unit test that
I added back, now testing negative and 0.5 cases too. The expectations
assume the current mode is nearest even.
I had the idea to resurrect this when I was looking at adding _imm Ops
for fma_f32. I noticed that the y and z arguments to an fma_f32 were by
far most likely to be constants, and when they are, they're by far likely
to both be constants, e.g. 255.0f & 0.5f from to_unorm(8,...).
llvm disassembly for SkVM_round unit test looks good:
~ $ llc -mcpu=haswell /tmp/skvm-jit-1231521224.bc -o -
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 15
.globl "_skvm-jit-1231521224" ## -- Begin function skvm-jit-1231521224
.p2align 4, 0x90
"_skvm-jit-1231521224": ## @skvm-jit-1231521224
.cfi_startproc
cmpl $8, %edi
jl LBB0_3
.p2align 4, 0x90
LBB0_2: ## %loopK
## =>This Inner Loop Header: Depth=1
vcvtps2dq (%rsi), %ymm0
vmovupd %ymm0, (%rdx)
addl $-8, %edi
addq $32, %rsi
addq $32, %rdx
cmpl $8, %edi
jge LBB0_2
LBB0_3: ## %hoist1
xorl %eax, %eax
testl %edi, %edi
jle LBB0_6
.p2align 4, 0x90
LBB0_5: ## %loop1
## =>This Inner Loop Header: Depth=1
vcvtss2si (%rsi,%rax), %ecx
movl %ecx, (%rdx,%rax)
decl %edi
addq $4, %rax
testl %edi, %edi
jg LBB0_5
LBB0_6: ## %leave
vzeroupper
retq
.cfi_endproc
## -- End function
Change-Id: Ib59eb3fd8a6805397850d93226c6c6d37cc3ab84
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/276738
Auto-Submit: Mike Klein <mtklein@google.com>
Commit-Queue: Herb Derby <herb@google.com>
Reviewed-by: Herb Derby <herb@google.com>
2020-03-12 16:05:46 +00:00
|
|
|
return shuffle<0,1>(lrint(shuffle<0,1,0,1>(x)));
|
|
|
|
}
|
2019-04-10 17:40:31 +00:00
|
|
|
#endif
|
2019-02-27 16:24:55 +00:00
|
|
|
|
2020-03-05 16:15:35 +00:00
|
|
|
#if defined(__AVX2__)
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> fma(const Vec<4,float>& x, const Vec<4,float>& y, const Vec<4,float>& z) {
|
2020-03-05 16:15:35 +00:00
|
|
|
return bit_pun<Vec<4,float>>(_mm_fmadd_ps(bit_pun<__m128>(x),
|
|
|
|
bit_pun<__m128>(y),
|
|
|
|
bit_pun<__m128>(z)));
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<8,float> fma(const Vec<8,float>& x, const Vec<8,float>& y, const Vec<8,float>& z) {
|
2020-03-05 16:15:35 +00:00
|
|
|
return bit_pun<Vec<8,float>>(_mm256_fmadd_ps(bit_pun<__m256>(x),
|
|
|
|
bit_pun<__m256>(y),
|
|
|
|
bit_pun<__m256>(z)));
|
|
|
|
}
|
|
|
|
#elif defined(__aarch64__)
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> fma(const Vec<4,float>& x, const Vec<4,float>& y, const Vec<4,float>& z) {
|
2020-03-05 16:15:35 +00:00
|
|
|
// These instructions tend to work like z += xy, so the order here is z,x,y.
|
|
|
|
return bit_pun<Vec<4,float>>(vfmaq_f32(bit_pun<float32x4_t>(z),
|
|
|
|
bit_pun<float32x4_t>(x),
|
|
|
|
bit_pun<float32x4_t>(y)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-06-30 22:08:44 +00:00
|
|
|
// WASM SIMD compatible operations which are not automatically compiled to SIMD commands
|
|
|
|
// by emscripten:
|
|
|
|
#if defined __wasm_simd128__
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4, float> rcp (const Vec<4, float>& x) { return 1.0f / x; }
|
|
|
|
SI Vec<2,double> rcp (const Vec<2,double>& x) { return 1.0f / x; }
|
|
|
|
SI Vec<4, float> rsqrt(const Vec<4, float>& x) { return 1.0f / sqrt(x); }
|
|
|
|
SI Vec<2,double> rsqrt(const Vec<2,double>& x) { return 1.0f / sqrt(x); }
|
|
|
|
|
|
|
|
SI Vec<4,float> min(const Vec<4,float>& x, const Vec<4,float>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,float>(wasm_f32x4_min(to_vext(x), to_vext(y)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> max(const Vec<4,float>& x, const Vec<4,float>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,float>(wasm_f32x4_max(to_vext(x), to_vext(y)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> sqrt(const Vec<4,float>& x) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,float>(wasm_f32x4_sqrt(to_vext(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,float> abs(const Vec<4,float>& x) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,float>(wasm_f32x4_abs(to_vext(x)));
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,double> min(const Vec<2,double>& x, const Vec<2,double>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<2,double>(wasm_f64x2_min(to_vext(x), to_vext(y)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,double> max(const Vec<2,double>& x, const Vec<2,double>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<2,double>(wasm_f64x2_max(to_vext(x), to_vext(y)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,double> sqrt(const Vec<2,double>& x) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<2,double>(wasm_f64x2_sqrt(to_vext(x)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<2,double> abs(const Vec<2,double>& x) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<2,double>(wasm_f64x2_abs(to_vext(x)));
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SI bool any(const Vec<4, int32_t>& x) { return wasm_i32x4_any_true(to_vext(x)); }
|
|
|
|
SI bool any(const Vec<4,uint32_t>& x) { return wasm_i32x4_any_true(to_vext(x)); }
|
|
|
|
SI bool all(const Vec<4, int32_t>& x) { return wasm_i32x4_all_true(to_vext(x)); }
|
|
|
|
SI bool all(const Vec<4,uint32_t>& x) { return wasm_i32x4_all_true(to_vext(x)); }
|
|
|
|
|
|
|
|
SI Vec<4,int32_t> min(const Vec<4,int32_t>& x, const Vec<4,int32_t>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,int32_t>(wasm_i32x4_min(to_vext(x), to_vext(y)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,int32_t> max(const Vec<4,int32_t>& x, const Vec<4,int32_t>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,int32_t>(wasm_i32x4_max(to_vext(x), to_vext(y)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,int32_t> abs(const Vec<4,int32_t>& x) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,int32_t>(wasm_i32x4_abs(to_vext(x)));
|
|
|
|
}
|
|
|
|
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,uint32_t> min(const Vec<4,uint32_t>& x, const Vec<4,uint32_t>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,uint32_t>(wasm_u32x4_min(to_vext(x), to_vext(y)));
|
|
|
|
}
|
2020-09-02 14:00:57 +00:00
|
|
|
SI Vec<4,uint32_t> max(const Vec<4,uint32_t>& x, const Vec<4,uint32_t>& y) {
|
2020-06-30 22:08:44 +00:00
|
|
|
return to_vec<4,uint32_t>(wasm_u32x4_max(to_vext(x), to_vext(y)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-06-07 15:57:58 +00:00
|
|
|
#endif // !defined(SKNX_NO_SIMD)
|
2019-02-07 14:49:17 +00:00
|
|
|
|
|
|
|
} // namespace skvx
|
|
|
|
|
2019-03-14 18:30:42 +00:00
|
|
|
#undef SINTU
|
2019-02-07 14:49:17 +00:00
|
|
|
#undef SINT
|
|
|
|
#undef SIT
|
2020-09-02 14:00:57 +00:00
|
|
|
#undef SI
|
2019-02-05 18:42:46 +00:00
|
|
|
|
|
|
|
#endif//SKVX_DEFINED
|