Expand _01 half<->float limitation to _finite. Simplify.

It's become clear we need to sometimes deal with values <0 or >1.
    I'm not yet convinced we care about NaN or +-inf.

    We had some fairly clever tricks and optimizations here for NEON
    and SSE.  I've thrown them out in favor of a single implementation.
    If we find the specializations mattered, we can certainly figure out
    how to extend them to this new range/domain.

    This happens to add a vectorized float -> half for ARMv7, which was
    missing from the _01 version.  (The SSE strategy was not portable to
    platforms that flush denorm floats to zero.)

    I've tested the full float range for FloatToHalf on my desktop and a 5x.

BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2145663003
CQ_INCLUDE_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot;master.client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot

Review-Url: https://codereview.chromium.org/2145663003
This commit is contained in:
mtklein 2016-07-14 11:02:09 -07:00 committed by Commit bot
parent 7cf36ccb44
commit 3296bee70d
11 changed files with 153 additions and 108 deletions

View File

@ -601,7 +601,7 @@ SkColor SkBitmap::getColor(int x, int y) const {
}
case kRGBA_F16_SkColorType: {
const uint64_t* addr = (const uint64_t*)fPixels + y * (fRowBytes >> 3) + x;
Sk4f p4 = SkHalfToFloat_01(addr[0]);
Sk4f p4 = SkHalfToFloat_finite(addr[0]);
if (p4[3]) {
float inva = 1 / p4[3];
p4 = p4 * Sk4f(inva, inva, inva, 1);
@ -1145,7 +1145,7 @@ bool SkBitmap::ReadRawPixels(SkReadBuffer* buffer, SkBitmap* bitmap) {
SkImageInfo info;
info.unflatten(*buffer);
// If there was an error reading "info" or if it is bogus,
// If there was an error reading "info" or if it is bogus,
// don't use it to compute minRowBytes()
if (!buffer->validate(SkColorTypeValidateAlphaType(info.colorType(),
info.alphaType()))) {

View File

@ -24,10 +24,10 @@ typedef uint16_t SkHalf;
float SkHalfToFloat(SkHalf h);
SkHalf SkFloatToHalf(float f);
// Convert between half and single precision floating point, but pull any dirty
// trick we can to make it faster as long as it's correct enough for values in [0,1].
static inline Sk4f SkHalfToFloat_01(uint64_t);
static inline uint64_t SkFloatToHalf_01(const Sk4f&);
// Convert between half and single precision floating point,
// assuming inputs and outputs are both finite.
static inline Sk4f SkHalfToFloat_finite(uint64_t);
static inline uint64_t SkFloatToHalf_finite(const Sk4f&);
// ~~~~~~~~~~~ impl ~~~~~~~~~~~~~~ //
@ -36,7 +36,7 @@ static inline uint64_t SkFloatToHalf_01(const Sk4f&);
// GCC 4.9 lacks the intrinsics to use ARMv8 f16<->f32 instructions, so we use inline assembly.
static inline Sk4f SkHalfToFloat_01(uint64_t hs) {
static inline Sk4f SkHalfToFloat_finite(uint64_t hs) {
#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64)
float32x4_t fs;
asm ("fmov %d[fs], %[hs] \n" // vcreate_f16(hs)
@ -44,53 +44,28 @@ static inline Sk4f SkHalfToFloat_01(uint64_t hs) {
: [fs] "=w" (fs) // =w: write-only NEON register
: [hs] "r" (hs)); // r: read-only 64-bit general register
return fs;
#elif !defined(SKNX_NO_SIMD) && defined(SK_ARM_HAS_NEON)
// NEON makes this pretty easy:
// - denormals are 10-bit * 2^-14 == 24-bit fixed point;
// - handle normals the same way as in SSE: align mantissa, then rebias exponent.
uint32x4_t h = vmovl_u16(vcreate_u16(hs)),
is_denorm = vcltq_u32(h, vdupq_n_u32(1<<10));
float32x4_t denorm = vcvtq_n_f32_u32(h, 24),
norm = vreinterpretq_f32_u32(vaddq_u32(vshlq_n_u32(h, 13),
vdupq_n_u32((127-15) << 23)));
return vbslq_f32(is_denorm, denorm, norm);
#elif !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
// If our input is a normal 16-bit float, things are pretty easy:
// - shift left by 13 to put the mantissa in the right place;
// - the exponent is wrong, but it just needs to be rebiased;
// - re-bias the exponent from 15-bias to 127-bias by adding (127-15).
// If our input is denormalized, we're going to do the same steps, plus a few more fix ups:
// - the input is h = K*2^-14, for some 10-bit fixed point K in [0,1);
// - by shifting left 13 and adding (127-15) to the exponent, we constructed the float value
// 2^-15*(1+K);
// - we'd need to subtract 2^-15 and multiply by 2 to get back to K*2^-14, or equivallently
// multiply by 2 then subtract 2^-14.
//
// - We'll work that multiply by 2 into the rebias, by adding 1 more to the exponent.
// - Conveniently, this leaves that rebias constant 2^-14, exactly what we want to subtract.
__m128i h = _mm_unpacklo_epi16(_mm_loadl_epi64((const __m128i*)&hs), _mm_setzero_si128());
const __m128i is_denorm = _mm_cmplt_epi32(h, _mm_set1_epi32(1<<10));
__m128i rebias = _mm_set1_epi32((127-15) << 23);
rebias = _mm_add_epi32(rebias, _mm_and_si128(is_denorm, _mm_set1_epi32(1<<23)));
__m128i f = _mm_add_epi32(_mm_slli_epi32(h, 13), rebias);
return _mm_sub_ps(_mm_castsi128_ps(f),
_mm_castsi128_ps(_mm_and_si128(is_denorm, rebias)));
#else
float fs[4];
for (int i = 0; i < 4; i++) {
fs[i] = SkHalfToFloat(hs >> (i*16));
}
return Sk4f::Load(fs);
Sk4i bits = SkNx_cast<int>(Sk4h::Load(&hs)), // Expand to 32 bit.
sign = bits & 0x00008000, // Save the sign bit for later...
positive = bits ^ sign, // ...but strip it off for now.
is_denorm = positive < (1<<10); // Exponent == 0?
// For normal half floats, extend the mantissa by 13 zero bits,
// then adjust the exponent from 15 bias to 127 bias.
Sk4i norm = (positive << 13) + ((127 - 15) << 23);
// For denorm half floats, mask in the exponent-only float K that turns our
// denorm value V*2^-14 into a normalized float K + V*2^-14. Then subtract off K.
const Sk4i K = ((127-15) + (23-10) + 1) << 23;
Sk4i mask_K = positive | K;
Sk4f denorm = Sk4f::Load(&mask_K) - Sk4f::Load(&K);
Sk4i merged = (sign << 16) | is_denorm.thenElse(Sk4i::Load(&denorm), norm);
return Sk4f::Load(&merged);
#endif
}
static inline uint64_t SkFloatToHalf_01(const Sk4f& fs) {
static inline uint64_t SkFloatToHalf_finite(const Sk4f& fs) {
uint64_t r;
#if !defined(SKNX_NO_SIMD) && defined(SK_CPU_ARM64)
float32x4_t vec = fs.fVec;
@ -98,25 +73,25 @@ static inline uint64_t SkFloatToHalf_01(const Sk4f& fs) {
"fmov %[r], %d[vec] \n" // vst1_f16(&r, ...)
: [r] "=r" (r) // =r: write-only 64-bit general register
, [vec] "+w" (vec)); // +w: read-write NEON register
// TODO: ARMv7 NEON float->half?
#elif !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
// Scale down from 127-bias to 15-bias, then cut off bottom 13 mantissa bits.
// This doesn't round, so it can be 1 bit too small.
const __m128 rebias = _mm_castsi128_ps(_mm_set1_epi32((127 - (127-15)) << 23));
__m128i h = _mm_srli_epi32(_mm_castps_si128(_mm_mul_ps(fs.fVec, rebias)), 13);
_mm_storel_epi64((__m128i*)&r, _mm_packs_epi32(h,h));
#else
SkHalf hs[4];
for (int i = 0; i < 4; i++) {
hs[i] = SkFloatToHalf(fs[i]);
}
r = (uint64_t)hs[3] << 48
| (uint64_t)hs[2] << 32
| (uint64_t)hs[1] << 16
| (uint64_t)hs[0] << 0;
Sk4i bits = Sk4i::Load(&fs),
sign = bits & 0x80000000, // Save the sign bit for later...
positive = bits ^ sign, // ...but strip it off for now.
will_be_denorm = positive < ((127-15+1) << 23); // positve < smallest normal half?
// For normal half floats, adjust the exponent from 127 bias to 15 bias,
// then drop the bottom 13 mantissa bits.
Sk4i norm = (positive - ((127 - 15) << 23)) >> 13;
// This mechanically inverts the denorm half -> normal float conversion above.
// Knowning that and reading its explanation will leave you feeling more confident
// than reading my best attempt at explaining this directly.
const Sk4i K = ((127-15) + (23-10) + 1) << 23;
Sk4f plus_K = Sk4f::Load(&positive) + Sk4f::Load(&K);
Sk4i denorm = Sk4i::Load(&plus_K) ^ K;
Sk4i merged = (sign >> 16) | will_be_denorm.thenElse(denorm, norm);
SkNx_cast<uint16_t>(merged).store(&r);
#endif
return r;
}

View File

@ -198,7 +198,7 @@ public:
PixelConverter(const SkPixmap& srcPixmap) { }
Sk4f toSk4f(const Element pixel) const {
return SkHalfToFloat_01(pixel);
return SkHalfToFloat_finite(pixel);
}
};

View File

@ -85,10 +85,10 @@ struct ColorTypeFilter_8 {
struct ColorTypeFilter_F16 {
typedef uint64_t Type; // SkHalf x4
static Sk4f Expand(uint64_t x) {
return SkHalfToFloat_01(x);
return SkHalfToFloat_finite(x);
}
static uint64_t Compact(const Sk4f& x) {
return SkFloatToHalf_01(x);
return SkFloatToHalf_finite(x);
}
};

View File

@ -42,7 +42,7 @@ static void load_f16(const SkPixmap& src, int x, int y, SkPM4f span[], int count
SkASSERT(src.addr64(x + count - 1, y));
for (int i = 0; i < count; ++i) {
SkHalfToFloat_01(addr[i]).store(span[i].fVec);
SkHalfToFloat_finite(addr[i]).store(span[i].fVec);
}
}

View File

@ -22,16 +22,16 @@ static void xfer_1(const SkXfermode* xfer, uint64_t dst[], const SkPM4f* src, in
SkPM4f d;
if (aa) {
for (int i = 0; i < count; ++i) {
Sk4f d4 = SkHalfToFloat_01(dst[i]);
Sk4f d4 = SkHalfToFloat_finite(dst[i]);
d4.store(d.fVec);
Sk4f r4 = Sk4f::Load(proc(*src, d).fVec);
dst[i] = SkFloatToHalf_01(lerp_by_coverage(r4, d4, aa[i]));
dst[i] = SkFloatToHalf_finite(lerp_by_coverage(r4, d4, aa[i]));
}
} else {
for (int i = 0; i < count; ++i) {
SkHalfToFloat_01(dst[i]).store(d.fVec);
SkHalfToFloat_finite(dst[i]).store(d.fVec);
Sk4f r4 = Sk4f::Load(proc(*src, d).fVec);
dst[i] = SkFloatToHalf_01(r4);
dst[i] = SkFloatToHalf_finite(r4);
}
}
}
@ -42,16 +42,16 @@ static void xfer_n(const SkXfermode* xfer, uint64_t dst[], const SkPM4f src[], i
SkPM4f d;
if (aa) {
for (int i = 0; i < count; ++i) {
Sk4f d4 = SkHalfToFloat_01(dst[i]);
Sk4f d4 = SkHalfToFloat_finite(dst[i]);
d4.store(d.fVec);
Sk4f r4 = Sk4f::Load(proc(src[i], d).fVec);
dst[i] = SkFloatToHalf_01(lerp_by_coverage(r4, d4, aa[i]));
dst[i] = SkFloatToHalf_finite(lerp_by_coverage(r4, d4, aa[i]));
}
} else {
for (int i = 0; i < count; ++i) {
SkHalfToFloat_01(dst[i]).store(d.fVec);
SkHalfToFloat_finite(dst[i]).store(d.fVec);
Sk4f r4 = Sk4f::Load(proc(src[i], d).fVec);
dst[i] = SkFloatToHalf_01(r4);
dst[i] = SkFloatToHalf_finite(r4);
}
}
}
@ -64,8 +64,8 @@ static void clear(const SkXfermode*, uint64_t dst[], const SkPM4f*, int count, c
if (aa) {
for (int i = 0; i < count; ++i) {
if (aa[i]) {
const Sk4f d4 = SkHalfToFloat_01(dst[i]);
dst[i] = SkFloatToHalf_01(d4 * Sk4f((255 - aa[i]) * 1.0f/255));
const Sk4f d4 = SkHalfToFloat_finite(dst[i]);
dst[i] = SkFloatToHalf_finite(d4 * Sk4f((255 - aa[i]) * 1.0f/255));
}
}
} else {
@ -82,11 +82,11 @@ static void src_1(const SkXfermode*, uint64_t dst[], const SkPM4f* src, int coun
const Sk4f s4 = Sk4f::Load(src->fVec);
if (aa) {
for (int i = 0; i < count; ++i) {
const Sk4f d4 = SkHalfToFloat_01(dst[i]);
dst[i] = SkFloatToHalf_01(lerp_by_coverage(s4, d4, aa[i]));
const Sk4f d4 = SkHalfToFloat_finite(dst[i]);
dst[i] = SkFloatToHalf_finite(lerp_by_coverage(s4, d4, aa[i]));
}
} else {
sk_memset64(dst, SkFloatToHalf_01(s4), count);
sk_memset64(dst, SkFloatToHalf_finite(s4), count);
}
}
@ -95,13 +95,13 @@ static void src_n(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int cou
if (aa) {
for (int i = 0; i < count; ++i) {
const Sk4f s4 = Sk4f::Load(src[i].fVec);
const Sk4f d4 = SkHalfToFloat_01(dst[i]);
dst[i] = SkFloatToHalf_01(lerp_by_coverage(s4, d4, aa[i]));
const Sk4f d4 = SkHalfToFloat_finite(dst[i]);
dst[i] = SkFloatToHalf_finite(lerp_by_coverage(s4, d4, aa[i]));
}
} else {
for (int i = 0; i < count; ++i) {
const Sk4f s4 = Sk4f::Load(src[i].fVec);
dst[i] = SkFloatToHalf_01(s4);
dst[i] = SkFloatToHalf_finite(s4);
}
}
}
@ -121,12 +121,12 @@ static void srcover_1(const SkXfermode*, uint64_t dst[], const SkPM4f* src, int
const Sk4f s4 = Sk4f::Load(src->fVec);
const Sk4f dst_scale = Sk4f(1 - get_alpha(s4));
for (int i = 0; i < count; ++i) {
const Sk4f d4 = SkHalfToFloat_01(dst[i]);
const Sk4f d4 = SkHalfToFloat_finite(dst[i]);
const Sk4f r4 = s4 + d4 * dst_scale;
if (aa) {
dst[i] = SkFloatToHalf_01(lerp_by_coverage(r4, d4, aa[i]));
dst[i] = SkFloatToHalf_finite(lerp_by_coverage(r4, d4, aa[i]));
} else {
dst[i] = SkFloatToHalf_01(r4);
dst[i] = SkFloatToHalf_finite(r4);
}
}
}
@ -135,12 +135,12 @@ static void srcover_n(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int
const SkAlpha aa[]) {
for (int i = 0; i < count; ++i) {
Sk4f s = Sk4f::Load(src+i),
d = SkHalfToFloat_01(dst[i]),
d = SkHalfToFloat_finite(dst[i]),
r = s + d*(1.0f - SkNx_shuffle<3,3,3,3>(s));
if (aa) {
r = lerp_by_coverage(r, d, aa[i]);
}
dst[i] = SkFloatToHalf_01(r);
dst[i] = SkFloatToHalf_finite(r);
}
}

View File

@ -143,11 +143,11 @@ struct DstTraits<DstType::F16, premul> {
}
static void store(const Sk4f& c, Type* dst) {
*dst = SkFloatToHalf_01(PM::apply(c));
*dst = SkFloatToHalf_finite(PM::apply(c));
}
static void store(const Sk4f& c, Type* dst, int n) {
sk_memset64(dst, SkFloatToHalf_01(PM::apply(c)), n);
sk_memset64(dst, SkFloatToHalf_finite(PM::apply(c)), n);
}
static void store4x(const Sk4f& c0, const Sk4f& c1,

View File

@ -388,13 +388,28 @@ public:
SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); }
SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); }
SkNx operator == (const SkNx& o) const {
return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
}
SkNx operator < (const SkNx& o) const {
return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
}
SkNx operator > (const SkNx& o) const {
return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
}
static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
// TODO as needed
SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec);
}
int32x4_t fVec;
};
@ -456,6 +471,14 @@ template<> inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) {
return vqmovn_u16(vcombine_u16(_16, _16));
}
template<> inline Sk4i SkNx_cast<int, uint16_t>(const Sk4h& src) {
return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
}
template<> inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) {
return vmovn_u32(vreinterpretq_u32_s32(src.fVec));
}
static inline Sk4i Sk4f_round(const Sk4f& x) {
return vcvtq_s32_f32((x + 0.5f).fVec);
}

View File

@ -152,16 +152,30 @@ public:
SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
int operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; int is[4]; } pun = {fVec};
return pun.is[k&3];
}
SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
#else
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
#endif
}
__m128i fVec;
};
@ -372,7 +386,21 @@ template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
return _mm_packus_epi16(src.fVec, src.fVec);
}
template<> inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) {
template<> /*static*/ inline Sk4i SkNx_cast<int, uint16_t>(const Sk4h& src) {
return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
}
template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_packus_epi32(src.fVec, src.fVec);
#else
// Sign extend to trick _mm_packs_epi32() into doing the pack we want.
__m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
return _mm_packs_epi32(x,x);
#endif
}
template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) {
return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
}

View File

@ -61,26 +61,26 @@ static uint32_t u(float f) {
return x;
}
DEF_TEST(HalfToFloat_01, r) {
for (uint16_t h = 0; h < 0x8000; h++) {
DEF_TEST(HalfToFloat_finite, r) {
for (uint32_t h = 0; h <= 0xffff; h++) {
float f = SkHalfToFloat(h);
if (f >= 0 && f <= 1) {
float got = SkHalfToFloat_01(h)[0];
if (isfinite(f)) {
float got = SkHalfToFloat_finite(h)[0];
if (got != f) {
SkDebugf("0x%04x -> 0x%08x (%g), want 0x%08x (%g)\n",
h,
u(got), got,
u(f), f);
}
REPORTER_ASSERT(r, SkHalfToFloat_01(h)[0] == f);
REPORTER_ASSERT(r, SkFloatToHalf_01(SkHalfToFloat_01(h)) == h);
REPORTER_ASSERT(r, SkHalfToFloat_finite(h)[0] == f);
REPORTER_ASSERT(r, SkFloatToHalf_finite(SkHalfToFloat_finite(h)) == h);
}
}
}
DEF_TEST(FloatToHalf_01, r) {
DEF_TEST(FloatToHalf_finite, r) {
#if 0
for (uint32_t bits = 0; bits < 0x80000000; bits++) {
for (uint64_t bits = 0; bits <= 0xffffffff; bits++) {
#else
SkRandom rand;
for (int i = 0; i < 1000000; i++) {
@ -88,14 +88,14 @@ DEF_TEST(FloatToHalf_01, r) {
#endif
float f;
memcpy(&f, &bits, 4);
if (f >= 0 && f <= 1) {
uint16_t h1 = (uint16_t)SkFloatToHalf_01(Sk4f(f,0,0,0)),
if (isfinite(f) && isfinite(SkHalfToFloat(SkFloatToHalf(f)))) {
uint16_t h1 = (uint16_t)SkFloatToHalf_finite(Sk4f(f,0,0,0)),
h2 = SkFloatToHalf(f);
bool ok = (h1 == h2 || h1 == h2-1);
REPORTER_ASSERT(r, ok);
if (!ok) {
SkDebugf("%08x (%d) -> %04x (%d), want %04x (%d)\n",
bits, bits>>23, h1, h1>>10, h2, h2>>10);
SkDebugf("%08x (%g) -> %04x, want %04x (%g)\n",
bits, f, h1, h2, SkHalfToFloat(h2));
break;
}
}

View File

@ -288,3 +288,22 @@ DEF_TEST(SkNx_u16_float, r) {
REPORTER_ASSERT(r, !memcmp(s16, d16, sizeof(s16)));
}
}
// The SSE2 implementation of SkNx_cast<uint16_t>(Sk4i) is non-trivial, so worth a test.
DEF_TEST(SkNx_int_u16, r) {
// These are pretty hard to get wrong.
for (int i = 0; i <= 0x7fff; i++) {
uint16_t expected = (uint16_t)i;
uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0];
REPORTER_ASSERT(r, expected == actual);
}
// A naive implementation with _mm_packs_epi32 would succeed up to 0x7fff but fail here:
for (int i = 0x8000; (1) && i <= 0xffff; i++) {
uint16_t expected = (uint16_t)i;
uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0];
REPORTER_ASSERT(r, expected == actual);
}
}