Rework SSE and NEON Color32 algorithms to be more correct and faster.

This algorithm changes the blend math, guarded by SK_LEGACY_COLOR32_MATH.  The new math is more correct: it's never off by more than 1, and correct in all the interesting 0x00 and 0xFF edge cases, where the old math was never off by more than 2, and not always correct on the edges.

If you look at tests/BlendTest.cpp, the old code was using the `blend_256_plus1_trunc` algorithm, while the new code uses `blend_256_round_alt`.  Neither uses `blend_perfect`, which is about ~35% slower than `blend_256_round_alt`.

This will require an unfathomable number of rebaselines, first to Skia, then to Blink when I remove the guard.

I plan to follow up with some integer SIMD abstractions that can unify these two implementations into a single algorithm.  This was originally what I was working on here, but the correctness gains seem to be quite compelling.  The only places these two algorithms really differ greatly now is the kernel function, and even there they can really both be expressed abstractly as:
  - multiply 8-bits and 8-bits producing 16-bits
  - add 16-bits to 16-bits, returning the top 8 bits.
All the constants are the same, except SSE is a little faster to keep 8 16-bit inverse alphas, NEON's a little faster to keep 8 8-bit inverse alphas.  I may need to take this small speed win back to unify the two.

We should expect a ~25% speedup on Intel (mostly from unrolling to 8 pixels) and a ~20% speedup on ARM (mostly from using vaddhn to add `color`, round, and narrow back down to 8-bit all into one instruction.

(I am probably missing several more related bugs here.)
BUG=skia:3738,skia:420,chromium:111470

Review URL: https://codereview.chromium.org/1092433002
This commit is contained in:
mtklein 2015-04-17 11:00:54 -07:00 committed by Commit bot
parent 9d911d5a93
commit afe2ffb8ba
3 changed files with 137 additions and 157 deletions

View File

@ -140,27 +140,37 @@ SkBlitRow::Proc32 SkBlitRow::ColorProcFactory() {
return proc;
}
#define SK_SUPPORT_LEGACY_COLOR32_MATHx
// Color32 and its SIMD specializations use the blend_256_round_alt algorithm
// from tests/BlendTest.cpp. It's not quite perfect, but it's never wrong in the
// interesting edge cases, and it's quite a bit faster than blend_perfect.
//
// blend_256_round_alt is our currently blessed algorithm. Please use it or an analogous one.
void SkBlitRow::Color32(SkPMColor* SK_RESTRICT dst,
const SkPMColor* SK_RESTRICT src,
int count, SkPMColor color) {
if (count > 0) {
if (0 == color) {
if (src != dst) {
memcpy(dst, src, count * sizeof(SkPMColor));
}
return;
}
unsigned colorA = SkGetPackedA32(color);
if (255 == colorA) {
sk_memset32(dst, color, count);
} else {
unsigned scale = 256 - SkAlpha255To256(colorA);
do {
*dst = color + SkAlphaMulQ(*src, scale);
src += 1;
dst += 1;
} while (--count);
}
switch (SkGetPackedA32(color)) {
case 0: memmove(dst, src, count * sizeof(SkPMColor)); return;
case 255: sk_memset32(dst, color, count); return;
}
unsigned invA = 255 - SkGetPackedA32(color);
#ifdef SK_SUPPORT_LEGACY_COLOR32_MATH // blend_256_plus1_trunc, busted
unsigned round = 0;
#else // blend_256_round_alt, good
invA += invA >> 7;
unsigned round = (128 << 16) + (128 << 0);
#endif
while (count --> 0) {
// Our math is 16-bit, so we can do a little bit of SIMD in 32-bit registers.
const uint32_t mask = 0x00FF00FF;
uint32_t rb = (((*src >> 0) & mask) * invA + round) >> 8, // _r_b
ag = (((*src >> 8) & mask) * invA + round) >> 0; // a_g_
*dst = color + ((rb & mask) | (ag & ~mask));
src++;
dst++;
}
}

View File

@ -232,60 +232,68 @@ void S32A_Blend_BlitRow32_SSE2(SkPMColor* SK_RESTRICT dst,
}
}
#define SK_SUPPORT_LEGACY_COLOR32_MATHx
/* SSE2 version of Color32()
* portable version is in core/SkBlitRow_D32.cpp
*/
void Color32_SSE2(SkPMColor dst[], const SkPMColor src[], int count,
SkPMColor color) {
if (count <= 0) {
return;
// Color32 and its SIMD specializations use the blend_256_round_alt algorithm
// from tests/BlendTest.cpp. It's not quite perfect, but it's never wrong in the
// interesting edge cases, and it's quite a bit faster than blend_perfect.
//
// blend_256_round_alt is our currently blessed algorithm. Please use it or an analogous one.
void Color32_SSE2(SkPMColor dst[], const SkPMColor src[], int count, SkPMColor color) {
switch (SkGetPackedA32(color)) {
case 0: memmove(dst, src, count * sizeof(SkPMColor)); return;
case 255: sk_memset32(dst, color, count); return;
}
if (0 == color) {
if (src != dst) {
memcpy(dst, src, count * sizeof(SkPMColor));
}
return;
__m128i colorHigh = _mm_unpacklo_epi8(_mm_setzero_si128(), _mm_set1_epi32(color));
#ifdef SK_SUPPORT_LEGACY_COLOR32_MATH // blend_256_plus1_trunc, busted
__m128i colorAndRound = colorHigh;
#else // blend_256_round_alt, good
__m128i colorAndRound = _mm_add_epi16(colorHigh, _mm_set1_epi16(128));
#endif
unsigned invA = 255 - SkGetPackedA32(color);
#ifdef SK_SUPPORT_LEGACY_COLOR32_MATH // blend_256_plus1_trunc, busted
__m128i invA16 = _mm_set1_epi16(invA);
#else // blend_256_round_alt, good
SkASSERT(invA + (invA >> 7) < 256); // We should still fit in the low byte here.
__m128i invA16 = _mm_set1_epi16(invA + (invA >> 7));
#endif
// Does the core work of blending color onto 4 pixels, returning the resulting 4 pixels.
auto kernel = [&](const __m128i& src4) -> __m128i {
__m128i lo = _mm_mullo_epi16(invA16, _mm_unpacklo_epi8(src4, _mm_setzero_si128())),
hi = _mm_mullo_epi16(invA16, _mm_unpackhi_epi8(src4, _mm_setzero_si128()));
return _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(colorAndRound, lo), 8),
_mm_srli_epi16(_mm_add_epi16(colorAndRound, hi), 8));
};
while (count >= 8) {
__m128i dst0 = kernel(_mm_loadu_si128((const __m128i*)(src+0))),
dst4 = kernel(_mm_loadu_si128((const __m128i*)(src+4)));
_mm_storeu_si128((__m128i*)(dst+0), dst0);
_mm_storeu_si128((__m128i*)(dst+4), dst4);
src += 8;
dst += 8;
count -= 8;
}
unsigned colorA = SkGetPackedA32(color);
if (255 == colorA) {
sk_memset32(dst, color, count);
} else {
unsigned scale = 256 - SkAlpha255To256(colorA);
if (count >= 4) {
SkASSERT(((size_t)dst & 0x03) == 0);
while (((size_t)dst & 0x0F) != 0) {
*dst = color + SkAlphaMulQ(*src, scale);
src++;
dst++;
count--;
}
const __m128i *s = reinterpret_cast<const __m128i*>(src);
__m128i *d = reinterpret_cast<__m128i*>(dst);
__m128i color_wide = _mm_set1_epi32(color);
while (count >= 4) {
__m128i src_pixel = _mm_loadu_si128(s);
src_pixel = SkAlphaMulQ_SSE2(src_pixel, scale);
__m128i result = _mm_add_epi8(color_wide, src_pixel);
_mm_store_si128(d, result);
s++;
d++;
count -= 4;
}
src = reinterpret_cast<const SkPMColor*>(s);
dst = reinterpret_cast<SkPMColor*>(d);
}
while (count > 0) {
*dst = color + SkAlphaMulQ(*src, scale);
src += 1;
dst += 1;
count--;
}
if (count >= 4) {
_mm_storeu_si128((__m128i*)dst, kernel(_mm_loadu_si128((const __m128i*)src)));
src += 4;
dst += 4;
count -= 4;
}
if (count >= 2) {
_mm_storel_epi64((__m128i*)dst, kernel(_mm_loadl_epi64((const __m128i*)src)));
src += 2;
dst += 2;
count -= 2;
}
if (count >= 1) {
*dst = _mm_cvtsi128_si32(kernel(_mm_cvtsi32_si128(*src)));
}
}

View File

@ -1679,104 +1679,66 @@ void S32_D565_Opaque_Dither_neon(uint16_t* SK_RESTRICT dst,
}
}
void Color32_arm_neon(SkPMColor* dst, const SkPMColor* src, int count,
SkPMColor color) {
if (count <= 0) {
return;
#define SK_SUPPORT_LEGACY_COLOR32_MATHx
// Color32 and its SIMD specializations use the blend_256_round_alt algorithm
// from tests/BlendTest.cpp. It's not quite perfect, but it's never wrong in the
// interesting edge cases, and it's quite a bit faster than blend_perfect.
//
// blend_256_round_alt is our currently blessed algorithm. Please use it or an analogous one.
void Color32_arm_neon(SkPMColor* dst, const SkPMColor* src, int count, SkPMColor color) {
switch (SkGetPackedA32(color)) {
case 0: memmove(dst, src, count * sizeof(SkPMColor)); return;
case 255: sk_memset32(dst, color, count); return;
}
if (0 == color) {
if (src != dst) {
memcpy(dst, src, count * sizeof(SkPMColor));
}
return;
}
unsigned colorA = SkGetPackedA32(color);
if (255 == colorA) {
sk_memset32(dst, color, count);
return;
}
unsigned scale = 256 - SkAlpha255To256(colorA);
if (count >= 8) {
uint32x4_t vcolor;
uint8x8_t vscale;
vcolor = vdupq_n_u32(color);
// scale numerical interval [0-255], so load as 8 bits
vscale = vdup_n_u8(scale);
do {
// load src color, 8 pixels, 4 64 bit registers
// (and increment src).
uint32x2x4_t vsrc;
#if defined(SK_CPU_ARM32) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6)))
asm (
"vld1.32 %h[vsrc], [%[src]]!"
: [vsrc] "=w" (vsrc), [src] "+r" (src)
: :
);
#else // 64bit targets and Clang
vsrc.val[0] = vld1_u32(src);
vsrc.val[1] = vld1_u32(src+2);
vsrc.val[2] = vld1_u32(src+4);
vsrc.val[3] = vld1_u32(src+6);
src += 8;
uint16x8_t colorHigh = vshll_n_u8((uint8x8_t)vdup_n_u32(color), 8);
#ifdef SK_SUPPORT_LEGACY_COLOR32_MATH // blend_256_plus1_trunc, busted
uint16x8_t colorAndRound = colorHigh;
#else // blend_256_round_alt, good
uint16x8_t colorAndRound = vaddq_u16(colorHigh, vdupq_n_u16(128));
#endif
// multiply long by scale, 64 bits at a time,
// destination into a 128 bit register.
uint16x8x4_t vtmp;
vtmp.val[0] = vmull_u8(vreinterpret_u8_u32(vsrc.val[0]), vscale);
vtmp.val[1] = vmull_u8(vreinterpret_u8_u32(vsrc.val[1]), vscale);
vtmp.val[2] = vmull_u8(vreinterpret_u8_u32(vsrc.val[2]), vscale);
vtmp.val[3] = vmull_u8(vreinterpret_u8_u32(vsrc.val[3]), vscale);
// shift the 128 bit registers, containing the 16
// bit scaled values back to 8 bits, narrowing the
// results to 64 bit registers.
uint8x16x2_t vres;
vres.val[0] = vcombine_u8(
vshrn_n_u16(vtmp.val[0], 8),
vshrn_n_u16(vtmp.val[1], 8));
vres.val[1] = vcombine_u8(
vshrn_n_u16(vtmp.val[2], 8),
vshrn_n_u16(vtmp.val[3], 8));
// adding back the color, using 128 bit registers.
uint32x4x2_t vdst;
vdst.val[0] = vreinterpretq_u32_u8(vres.val[0] +
vreinterpretq_u8_u32(vcolor));
vdst.val[1] = vreinterpretq_u32_u8(vres.val[1] +
vreinterpretq_u8_u32(vcolor));
// store back the 8 calculated pixels (2 128 bit
// registers), and increment dst.
#if defined(SK_CPU_ARM32) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6)))
asm (
"vst1.32 %h[vdst], [%[dst]]!"
: [dst] "+r" (dst)
: [vdst] "w" (vdst)
: "memory"
);
#else // 64bit targets and Clang
vst1q_u32(dst, vdst.val[0]);
vst1q_u32(dst+4, vdst.val[1]);
dst += 8;
unsigned invA = 255 - SkGetPackedA32(color);
#ifdef SK_SUPPORT_LEGACY_COLOR32_MATH // blend_256_plus1_trunc, busted
uint8x8_t invA8 = vdup_n_u8(invA);
#else // blend_256_round_alt, good
SkASSERT(invA + (invA >> 7) < 256); // This next part only works if alpha is not 0.
uint8x8_t invA8 = vdup_n_u8(invA + (invA >> 7));
#endif
count -= 8;
} while (count >= 8);
// Does the core work of blending color onto 4 pixels, returning the resulting 4 pixels.
auto kernel = [&](const uint32x4_t& src4) -> uint32x4_t {
uint16x8_t lo = vmull_u8(vget_low_u8( (uint8x16_t)src4), invA8),
hi = vmull_u8(vget_high_u8((uint8x16_t)src4), invA8);
return (uint32x4_t)
vcombine_u8(vaddhn_u16(colorAndRound, lo), vaddhn_u16(colorAndRound, hi));
};
while (count >= 8) {
uint32x4_t dst0 = kernel(vld1q_u32(src+0)),
dst4 = kernel(vld1q_u32(src+4));
vst1q_u32(dst+0, dst0);
vst1q_u32(dst+4, dst4);
src += 8;
dst += 8;
count -= 8;
}
while (count > 0) {
*dst = color + SkAlphaMulQ(*src, scale);
src += 1;
dst += 1;
count--;
if (count >= 4) {
vst1q_u32(dst, kernel(vld1q_u32(src)));
src += 4;
dst += 4;
count -= 4;
}
if (count >= 2) {
uint32x2_t src2 = vld1_u32(src);
vst1_u32(dst, vget_low_u32(kernel(vcombine_u32(src2, src2))));
src += 2;
dst += 2;
count -= 2;
}
if (count >= 1) {
vst1q_lane_u32(dst, kernel(vdupq_n_u32(*src)), 0);
}
}