Port SkBlurImage opts to SkOpts.

+268 -535 lines

I also rearranged the code a little bit to encapsulate itself better,
mostly replacing static helper functions with lambdas.  This also
let me merge the SSE2 and SSE4.1 code paths.

BUG=skia:4117

Review URL: https://codereview.chromium.org/1264103004
This commit is contained in:
mtklein 2015-08-04 08:49:21 -07:00 committed by Commit bot
parent 562a66b093
commit dce5ce4276
19 changed files with 360 additions and 637 deletions

View File

@ -7,7 +7,6 @@
'<(skia_src_path)/opts/SkBitmapProcState_opts_none.cpp',
'<(skia_src_path)/opts/SkBlitMask_opts_none.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_none.cpp',
'<(skia_src_path)/opts/SkBlurImage_opts_none.cpp',
'<(skia_src_path)/opts/SkMorphology_opts_none.cpp',
'<(skia_src_path)/opts/SkTextureCompression_opts_none.cpp',
],
@ -16,7 +15,6 @@
'<(skia_src_path)/opts/SkBitmapProcState_opts_arm.cpp',
'<(skia_src_path)/opts/SkBlitMask_opts_arm.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_arm.cpp',
'<(skia_src_path)/opts/SkBlurImage_opts_arm.cpp',
'<(skia_src_path)/opts/SkMorphology_opts_arm.cpp',
'<(skia_src_path)/opts/SkTextureCompression_opts_arm.cpp',
],
@ -25,7 +23,6 @@
'<(skia_src_path)/opts/SkBitmapProcState_matrixProcs_neon.cpp',
'<(skia_src_path)/opts/SkBlitMask_opts_arm_neon.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_arm_neon.cpp',
'<(skia_src_path)/opts/SkBlurImage_opts_neon.cpp',
'<(skia_src_path)/opts/SkMorphology_opts_neon.cpp',
'<(skia_src_path)/opts/SkTextureCompression_opts_neon.cpp',
'<(skia_src_path)/opts/SkOpts_neon.cpp',
@ -38,8 +35,6 @@
'<(skia_src_path)/opts/SkBlitMask_opts_arm_neon.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_arm.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_arm_neon.cpp',
'<(skia_src_path)/opts/SkBlurImage_opts_arm.cpp',
'<(skia_src_path)/opts/SkBlurImage_opts_neon.cpp',
'<(skia_src_path)/opts/SkMorphology_opts_arm.cpp',
'<(skia_src_path)/opts/SkMorphology_opts_neon.cpp',
'<(skia_src_path)/opts/SkTextureCompression_opts_none.cpp',
@ -50,7 +45,6 @@
'<(skia_src_path)/opts/SkBitmapProcState_opts_mips_dsp.cpp',
'<(skia_src_path)/opts/SkBlitMask_opts_none.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_mips_dsp.cpp',
'<(skia_src_path)/opts/SkBlurImage_opts_none.cpp',
'<(skia_src_path)/opts/SkMorphology_opts_none.cpp',
'<(skia_src_path)/opts/SkTextureCompression_opts_none.cpp',
],
@ -59,7 +53,6 @@
'<(skia_src_path)/opts/SkBitmapFilter_opts_SSE2.cpp',
'<(skia_src_path)/opts/SkBitmapProcState_opts_SSE2.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_SSE2.cpp',
'<(skia_src_path)/opts/SkBlurImage_opts_SSE2.cpp',
'<(skia_src_path)/opts/SkMorphology_opts_SSE2.cpp',
'<(skia_src_path)/opts/SkTextureCompression_opts_none.cpp',
'<(skia_src_path)/opts/opts_check_x86.cpp',
@ -70,7 +63,6 @@
'<(skia_src_path)/opts/SkOpts_ssse3.cpp',
],
'sse41_sources': [
'<(skia_src_path)/opts/SkBlurImage_opts_SSE4.cpp',
'<(skia_src_path)/opts/SkBlitRow_opts_SSE4.cpp',
'<(skia_src_path)/opts/SkOpts_sse41.cpp',
],

View File

@ -7,6 +7,8 @@
#include "SkOnce.h"
#include "SkOpts.h"
#define SK_OPTS_NS portable
#include "SkBlurImageFilter_opts.h"
#include "SkXfermode_opts.h"
#if defined(SK_CPU_X86)
@ -47,6 +49,11 @@ namespace SkOpts {
decltype(memset32) memset32 = portable::memsetT<uint32_t>;
decltype(create_xfermode) create_xfermode = SkCreate4pxXfermode;
static const auto x = portable::kX, y = portable::kY;
decltype(box_blur_xx) box_blur_xx = portable::box_blur<x,x>;
decltype(box_blur_xy) box_blur_xy = portable::box_blur<x,y>;
decltype(box_blur_yx) box_blur_yx = portable::box_blur<y,x>;
// Each Init_foo() is defined in src/opts/SkOpts_foo.cpp.
void Init_sse2();
void Init_ssse3();

View File

@ -30,6 +30,9 @@ namespace SkOpts {
// May return nullptr if we haven't specialized the given Mode.
extern SkXfermode* (*create_xfermode)(const ProcCoeff&, SkXfermode::Mode);
typedef void (*BoxBlur)(const SkPMColor*, int, SkPMColor*, int, int, int, int, int);
extern BoxBlur box_blur_xx, box_blur_xy, box_blur_yx;
}
#endif//SkOpts_DEFINED

View File

@ -8,10 +8,10 @@
#include "SkBitmap.h"
#include "SkBlurImageFilter.h"
#include "SkColorPriv.h"
#include "SkGpuBlurUtils.h"
#include "SkOpts.h"
#include "SkReadBuffer.h"
#include "SkWriteBuffer.h"
#include "SkGpuBlurUtils.h"
#include "SkBlurImage_opts.h"
#if SK_SUPPORT_GPU
#include "GrContext.h"
#endif
@ -51,83 +51,6 @@ void SkBlurImageFilter::flatten(SkWriteBuffer& buffer) const {
buffer.writeScalar(fSigma.fHeight);
}
enum BlurDirection {
kX, kY
};
/**
*
* In order to make memory accesses cache-friendly, we reorder the passes to
* use contiguous memory reads wherever possible.
*
* For example, the 6 passes of the X-and-Y blur case are rewritten as
* follows. Instead of 3 passes in X and 3 passes in Y, we perform
* 2 passes in X, 1 pass in X transposed to Y on write, 2 passes in X,
* then 1 pass in X transposed to Y on write.
*
* +----+ +----+ +----+ +---+ +---+ +---+ +----+
* + AB + ----> | AB | ----> | AB | -----> | A | ----> | A | ----> | A | -----> | AB |
* +----+ blurX +----+ blurX +----+ blurXY | B | blurX | B | blurX | B | blurXY +----+
* +---+ +---+ +---+
*
* In this way, two of the y-blurs become x-blurs applied to transposed
* images, and all memory reads are contiguous.
*/
template<BlurDirection srcDirection, BlurDirection dstDirection>
static void boxBlur(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height)
{
int rightBorder = SkMin32(rightOffset + 1, width);
int srcStrideX = srcDirection == kX ? 1 : srcStride;
int dstStrideX = dstDirection == kX ? 1 : height;
int srcStrideY = srcDirection == kX ? srcStride : 1;
int dstStrideY = dstDirection == kX ? width : 1;
uint32_t scale = (1 << 24) / kernelSize;
uint32_t half = 1 << 23;
for (int y = 0; y < height; ++y) {
int sumA = 0, sumR = 0, sumG = 0, sumB = 0;
const SkPMColor* p = src;
for (int i = 0; i < rightBorder; ++i) {
sumA += SkGetPackedA32(*p);
sumR += SkGetPackedR32(*p);
sumG += SkGetPackedG32(*p);
sumB += SkGetPackedB32(*p);
p += srcStrideX;
}
const SkPMColor* sptr = src;
SkColor* dptr = dst;
for (int x = 0; x < width; ++x) {
*dptr = SkPackARGB32((sumA * scale + half) >> 24,
(sumR * scale + half) >> 24,
(sumG * scale + half) >> 24,
(sumB * scale + half) >> 24);
if (x >= leftOffset) {
SkColor l = *(sptr - leftOffset * srcStrideX);
sumA -= SkGetPackedA32(l);
sumR -= SkGetPackedR32(l);
sumG -= SkGetPackedG32(l);
sumB -= SkGetPackedB32(l);
}
if (x + rightOffset + 1 < width) {
SkColor r = *(sptr + (rightOffset + 1) * srcStrideX);
sumA += SkGetPackedA32(r);
sumR += SkGetPackedR32(r);
sumG += SkGetPackedG32(r);
sumB += SkGetPackedB32(r);
}
sptr += srcStrideX;
if (srcDirection == kY) {
SK_PREFETCH(sptr + (rightOffset + 1) * srcStrideX);
}
dptr += dstStrideX;
}
src += srcStrideY;
dst += dstStrideY;
}
}
static void getBox3Params(SkScalar s, int *kernelSize, int* kernelSize3, int *lowOffset,
int *highOffset)
{
@ -204,28 +127,40 @@ bool SkBlurImageFilter::onFilterImage(Proxy* proxy,
SkPMColor* d = dst->getAddr32(0, 0);
int w = dstBounds.width(), h = dstBounds.height();
int sw = src.rowBytesAsPixels();
SkBoxBlurProc boxBlurX, boxBlurXY, boxBlurYX;
if (!SkBoxBlurGetPlatformProcs(&boxBlurX, &boxBlurXY, &boxBlurYX)) {
boxBlurX = boxBlur<kX, kX>;
boxBlurXY = boxBlur<kX, kY>;
boxBlurYX = boxBlur<kY, kX>;
}
/**
*
* In order to make memory accesses cache-friendly, we reorder the passes to
* use contiguous memory reads wherever possible.
*
* For example, the 6 passes of the X-and-Y blur case are rewritten as
* follows. Instead of 3 passes in X and 3 passes in Y, we perform
* 2 passes in X, 1 pass in X transposed to Y on write, 2 passes in X,
* then 1 pass in X transposed to Y on write.
*
* +----+ +----+ +----+ +---+ +---+ +---+ +----+
* + AB + ----> | AB | ----> | AB | -----> | A | ----> | A | ----> | A | -----> | AB |
* +----+ blurX +----+ blurX +----+ blurXY | B | blurX | B | blurX | B | blurXY +----+
* +---+ +---+ +---+
*
* In this way, two of the y-blurs become x-blurs applied to transposed
* images, and all memory reads are contiguous.
*/
if (kernelSizeX > 0 && kernelSizeY > 0) {
boxBlurX(s, sw, t, kernelSizeX, lowOffsetX, highOffsetX, w, h);
boxBlurX(t, w, d, kernelSizeX, highOffsetX, lowOffsetX, w, h);
boxBlurXY(d, w, t, kernelSizeX3, highOffsetX, highOffsetX, w, h);
boxBlurX(t, h, d, kernelSizeY, lowOffsetY, highOffsetY, h, w);
boxBlurX(d, h, t, kernelSizeY, highOffsetY, lowOffsetY, h, w);
boxBlurXY(t, h, d, kernelSizeY3, highOffsetY, highOffsetY, h, w);
SkOpts::box_blur_xx(s, sw, t, kernelSizeX, lowOffsetX, highOffsetX, w, h);
SkOpts::box_blur_xx(t, w, d, kernelSizeX, highOffsetX, lowOffsetX, w, h);
SkOpts::box_blur_xy(d, w, t, kernelSizeX3, highOffsetX, highOffsetX, w, h);
SkOpts::box_blur_xx(t, h, d, kernelSizeY, lowOffsetY, highOffsetY, h, w);
SkOpts::box_blur_xx(d, h, t, kernelSizeY, highOffsetY, lowOffsetY, h, w);
SkOpts::box_blur_xy(t, h, d, kernelSizeY3, highOffsetY, highOffsetY, h, w);
} else if (kernelSizeX > 0) {
boxBlurX(s, sw, d, kernelSizeX, lowOffsetX, highOffsetX, w, h);
boxBlurX(d, w, t, kernelSizeX, highOffsetX, lowOffsetX, w, h);
boxBlurX(t, w, d, kernelSizeX3, highOffsetX, highOffsetX, w, h);
SkOpts::box_blur_xx(s, sw, d, kernelSizeX, lowOffsetX, highOffsetX, w, h);
SkOpts::box_blur_xx(d, w, t, kernelSizeX, highOffsetX, lowOffsetX, w, h);
SkOpts::box_blur_xx(t, w, d, kernelSizeX3, highOffsetX, highOffsetX, w, h);
} else if (kernelSizeY > 0) {
boxBlurYX(s, sw, d, kernelSizeY, lowOffsetY, highOffsetY, h, w);
boxBlurX(d, h, t, kernelSizeY, highOffsetY, lowOffsetY, h, w);
boxBlurXY(t, h, d, kernelSizeY3, highOffsetY, highOffsetY, h, w);
SkOpts::box_blur_yx(s, sw, d, kernelSizeY, lowOffsetY, highOffsetY, h, w);
SkOpts::box_blur_xx(d, h, t, kernelSizeY, highOffsetY, lowOffsetY, h, w);
SkOpts::box_blur_xy(t, h, d, kernelSizeY3, highOffsetY, highOffsetY, h, w);
}
return true;
}
@ -260,7 +195,7 @@ bool SkBlurImageFilter::filterImageGPU(Proxy* proxy, const SkBitmap& src, const
#if SK_SUPPORT_GPU
SkBitmap input = src;
SkIPoint srcOffset = SkIPoint::Make(0, 0);
if (this->getInput(0) &&
if (this->getInput(0) &&
!this->getInput(0)->getInputResultGPU(proxy, src, ctx, &input, &srcOffset)) {
return false;
}

View File

@ -0,0 +1,294 @@
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkBlurImageFilter_opts_DEFINED
#define SkBlurImageFilter_opts_DEFINED
#include "SkColorPriv.h"
#include "SkTypes.h"
namespace SK_OPTS_NS {
enum Direction { kX, kY };
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
template<Direction srcDirection, Direction dstDirection>
void box_blur(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
// ARGB -> 000A 000R 000G 000B
auto expand = [](int p) {
return _mm_cvtepu8_epi32(_mm_cvtsi32_si128(p));
};
// Axxx Rxxx Gxxx Bxxx -> ARGB
auto repack = [](__m128i p) {
const char _ = ~0; // Don't care what ends up in these bytes. This zeros them.
p = _mm_shuffle_epi8(p, _mm_set_epi8(_,_,_,_, _,_,_,_, _,_,_,_, 15,11,7,3));
return _mm_cvtsi128_si32(p);
};
#else
// ARGB -> 000A 000R 000G 000B
auto expand = [](int p) {
auto result = _mm_cvtsi32_si128(p);
result = _mm_unpacklo_epi8(result, _mm_setzero_si128());
result = _mm_unpacklo_epi16(result, _mm_setzero_si128());
return result;
};
// Axxx Rxxx Gxxx Bxxx -> ARGB
auto repack = [](__m128i p) {
p = _mm_srli_epi32(p, 24); // 000A 000R 000G 000B
p = _mm_packs_epi32(p, p); // xxxx xxxx 0A0R 0G0B
p = _mm_packus_epi16(p, p); // xxxx xxxx xxxx ARGB
return _mm_cvtsi128_si32(p);
};
// _mm_mullo_epi32 is not available, so use the standard trick to emulate it.
auto _mm_mullo_epi32 = [](__m128i a, __m128i b) {
__m128i p02 = _mm_mul_epu32(a, b),
p13 = _mm_mul_epu32(_mm_srli_si128(a, 4),
_mm_srli_si128(b, 4));
return _mm_unpacklo_epi32(_mm_shuffle_epi32(p02, _MM_SHUFFLE(0,0,2,0)),
_mm_shuffle_epi32(p13, _MM_SHUFFLE(0,0,2,0)));
};
#endif
const int rightBorder = SkMin32(rightOffset + 1, width);
const int srcStrideX = srcDirection == kX ? 1 : srcStride;
const int dstStrideX = dstDirection == kX ? 1 : height;
const int srcStrideY = srcDirection == kX ? srcStride : 1;
const int dstStrideY = dstDirection == kX ? width : 1;
const __m128i scale = _mm_set1_epi32((1 << 24) / kernelSize);
const __m128i half = _mm_set1_epi32(1 << 23);
for (int y = 0; y < height; ++y) {
__m128i sum = _mm_setzero_si128();
const SkPMColor* p = src;
for (int i = 0; i < rightBorder; ++i) {
sum = _mm_add_epi32(sum, expand(*p));
p += srcStrideX;
}
const SkPMColor* sptr = src;
SkColor* dptr = dst;
for (int x = 0; x < width; ++x) {
// TODO(mtklein): We are working in 8.24 here. Drop to 8.8 when the kernel is narrow?
// Multiply each component by scale (divide by kernel size) and add half to round.
auto result = _mm_mullo_epi32(sum, scale);
result = _mm_add_epi32(result, half);
// Now pack the top byte of each 32-bit lane back down into one 32-bit color.
// Axxx Rxxx Gxxx Bxxx -> xxxx xxxx xxxx ARGB
*dptr = repack(result);
// TODO(mtklein): experiment with breaking this loop into 3 parts
if (x >= leftOffset) {
SkColor l = *(sptr - leftOffset * srcStrideX);
sum = _mm_sub_epi32(sum, expand(l));
}
if (x + rightOffset + 1 < width) {
SkColor r = *(sptr + (rightOffset + 1) * srcStrideX);
sum = _mm_add_epi32(sum, expand(r));
}
sptr += srcStrideX;
if (srcDirection == kY) {
// TODO(mtklein): experiment with moving this prefetch forward
_mm_prefetch(reinterpret_cast<const char*>(sptr + (rightOffset + 1) * srcStrideX),
_MM_HINT_T0);
}
dptr += dstStrideX;
}
src += srcStrideY;
dst += dstStrideY;
}
}
#elif defined(SK_ARM_HAS_NEON)
// Fast path for kernel sizes between 2 and 127, working on two rows at a time.
template<Direction srcDirection, Direction dstDirection>
void box_blur_double(const SkPMColor** src, int srcStride, SkPMColor** dst, int kernelSize,
int leftOffset, int rightOffset, int width, int* height) {
// Load 2 pixels from adjacent rows.
auto load_2_pixels = [&](const SkPMColor* s) {
if (srcDirection == kX) {
// 10% faster by adding these 2 prefetches
SK_PREFETCH(s + 16);
SK_PREFETCH(s + 16 + srcStride);
auto one = vld1_lane_u32(s + 0, vdup_n_u32(0), 0),
two = vld1_lane_u32(s + srcStride, one, 1);
return vreinterpret_u8_u32(two);
} else {
return vld1_u8((uint8_t*)s);
}
};
const int rightBorder = SkMin32(rightOffset + 1, width);
const int srcStrideX = srcDirection == kX ? 1 : srcStride;
const int dstStrideX = dstDirection == kX ? 1 : *height;
const int srcStrideY = srcDirection == kX ? srcStride : 1;
const int dstStrideY = dstDirection == kX ? width : 1;
const uint16x8_t scale = vdupq_n_u16((1 << 15) / kernelSize);
for (; *height >= 2; *height -= 2) {
uint16x8_t sum = vdupq_n_u16(0);
const SkPMColor* p = *src;
for (int i = 0; i < rightBorder; i++) {
sum = vaddw_u8(sum, load_2_pixels(p));
p += srcStrideX;
}
const SkPMColor* sptr = *src;
SkPMColor* dptr = *dst;
for (int x = 0; x < width; x++) {
// val = (sum * scale * 2 + 0x8000) >> 16
uint16x8_t resultPixels = vreinterpretq_u16_s16(vqrdmulhq_s16(
vreinterpretq_s16_u16(sum), vreinterpretq_s16_u16(scale)));
if (dstDirection == kX) {
uint32x2_t px2 = vreinterpret_u32_u8(vmovn_u16(resultPixels));
vst1_lane_u32(dptr + 0, px2, 0);
vst1_lane_u32(dptr + width, px2, 1);
} else {
vst1_u8((uint8_t*)dptr, vmovn_u16(resultPixels));
}
if (x >= leftOffset) {
sum = vsubw_u8(sum, load_2_pixels(sptr - leftOffset * srcStrideX));
}
if (x + rightOffset + 1 < width) {
sum = vaddw_u8(sum, load_2_pixels(sptr + (rightOffset + 1) * srcStrideX));
}
sptr += srcStrideX;
dptr += dstStrideX;
}
*src += srcStrideY * 2;
*dst += dstStrideY * 2;
}
}
template<Direction srcDirection, Direction dstDirection>
void box_blur(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height) {
// ARGB -> 0A0R 0G0B
auto expand = [](uint32_t p) {
return vget_low_u16(vmovl_u8(vreinterpret_u8_u32(vdup_n_u32(p))));
};
const int rightBorder = SkMin32(rightOffset + 1, width);
const int srcStrideX = srcDirection == kX ? 1 : srcStride;
const int dstStrideX = dstDirection == kX ? 1 : height;
const int srcStrideY = srcDirection == kX ? srcStride : 1;
const int dstStrideY = dstDirection == kX ? width : 1;
const uint32x4_t scale = vdupq_n_u32((1 << 24) / kernelSize);
const uint32x4_t half = vdupq_n_u32(1 << 23);
if (1 < kernelSize && kernelSize < 128) {
box_blur_double<srcDirection, dstDirection>(&src, srcStride, &dst, kernelSize,
leftOffset, rightOffset, width, &height);
}
for (; height > 0; height--) {
uint32x4_t sum = vdupq_n_u32(0);
const SkPMColor* p = src;
for (int i = 0; i < rightBorder; ++i) {
sum = vaddw_u16(sum, expand(*p));
p += srcStrideX;
}
const SkPMColor* sptr = src;
SkPMColor* dptr = dst;
for (int x = 0; x < width; ++x) {
// ( half+sumA*scale half+sumR*scale half+sumG*scale half+sumB*scale )
uint32x4_t result = vmlaq_u32(half, sum, scale);
// Saturated conversion to 16-bit.
// ( AAAA RRRR GGGG BBBB ) -> ( 0A 0R 0G 0B )
uint16x4_t result16 = vqshrn_n_u32(result, 16);
// Saturated conversion to 8-bit.
// ( 0A 0R 0G 0B ) -> ( 0A 0R 0G 0B 0A 0R 0G 0B ) -> ( A R G B A R G B )
uint8x8_t result8 = vqshrn_n_u16(vcombine_u16(result16, result16), 8);
// ( A R G B A R G B ) -> ( ARGB ARGB ) -> ( ARGB )
// Store low 32 bits to destination.
vst1_lane_u32(dptr, vreinterpret_u32_u8(result8), 0);
if (x >= leftOffset) {
const SkPMColor* l = sptr - leftOffset * srcStrideX;
sum = vsubw_u16(sum, expand(*l));
}
if (x + rightOffset + 1 < width) {
const SkPMColor* r = sptr + (rightOffset + 1) * srcStrideX;
sum = vaddw_u16(sum, expand(*r));
}
sptr += srcStrideX;
if (srcDirection == kX) {
SK_PREFETCH(sptr + (rightOffset + 16) * srcStrideX);
}
dptr += dstStrideX;
}
src += srcStrideY;
dst += dstStrideY;
}
}
#else // Neither NEON nor >=SSE2.
template<Direction srcDirection, Direction dstDirection>
static void box_blur(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height) {
int rightBorder = SkMin32(rightOffset + 1, width);
int srcStrideX = srcDirection == kX ? 1 : srcStride;
int dstStrideX = dstDirection == kX ? 1 : height;
int srcStrideY = srcDirection == kX ? srcStride : 1;
int dstStrideY = dstDirection == kX ? width : 1;
uint32_t scale = (1 << 24) / kernelSize;
uint32_t half = 1 << 23;
for (int y = 0; y < height; ++y) {
int sumA = 0, sumR = 0, sumG = 0, sumB = 0;
const SkPMColor* p = src;
for (int i = 0; i < rightBorder; ++i) {
sumA += SkGetPackedA32(*p);
sumR += SkGetPackedR32(*p);
sumG += SkGetPackedG32(*p);
sumB += SkGetPackedB32(*p);
p += srcStrideX;
}
const SkPMColor* sptr = src;
SkColor* dptr = dst;
for (int x = 0; x < width; ++x) {
*dptr = SkPackARGB32((sumA * scale + half) >> 24,
(sumR * scale + half) >> 24,
(sumG * scale + half) >> 24,
(sumB * scale + half) >> 24);
if (x >= leftOffset) {
SkColor l = *(sptr - leftOffset * srcStrideX);
sumA -= SkGetPackedA32(l);
sumR -= SkGetPackedR32(l);
sumG -= SkGetPackedG32(l);
sumB -= SkGetPackedB32(l);
}
if (x + rightOffset + 1 < width) {
SkColor r = *(sptr + (rightOffset + 1) * srcStrideX);
sumA += SkGetPackedA32(r);
sumR += SkGetPackedR32(r);
sumG += SkGetPackedG32(r);
sumB += SkGetPackedB32(r);
}
sptr += srcStrideX;
if (srcDirection == kY) {
SK_PREFETCH(sptr + (rightOffset + 1) * srcStrideX);
}
dptr += dstStrideX;
}
src += srcStrideY;
dst += dstStrideY;
}
}
#endif
} // namespace SK_OPTS_NS
#endif

View File

@ -1,19 +0,0 @@
/*
* Copyright 2013 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkBlurImage_opts_DEFINED
#define SkBlurImage_opts_DEFINED
#include "SkColorPriv.h"
typedef void (*SkBoxBlurProc)(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height);
bool SkBoxBlurGetPlatformProcs(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX);
#endif

View File

@ -1,106 +0,0 @@
/*
* Copyright 2013 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include <emmintrin.h>
#include "SkBitmap.h"
#include "SkBlurImage_opts_SSE2.h"
#include "SkColorPriv.h"
#include "SkRect.h"
namespace {
enum BlurDirection {
kX, kY
};
/* Helper function to spread the components of a 32-bit integer into the
* lower 8 bits of each 32-bit element of an SSE register.
*/
inline __m128i expand(int a) {
const __m128i zero = _mm_setzero_si128();
// 0 0 0 0 0 0 0 0 0 0 0 0 A R G B
__m128i result = _mm_cvtsi32_si128(a);
// 0 0 0 0 0 0 0 0 0 A 0 R 0 G 0 B
result = _mm_unpacklo_epi8(result, zero);
// 0 0 0 A 0 0 0 R 0 0 0 G 0 0 0 B
return _mm_unpacklo_epi16(result, zero);
}
template<BlurDirection srcDirection, BlurDirection dstDirection>
void SkBoxBlur_SSE2(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height)
{
const int rightBorder = SkMin32(rightOffset + 1, width);
const int srcStrideX = srcDirection == kX ? 1 : srcStride;
const int dstStrideX = dstDirection == kX ? 1 : height;
const int srcStrideY = srcDirection == kX ? srcStride : 1;
const int dstStrideY = dstDirection == kX ? width : 1;
const __m128i scale = _mm_set1_epi32((1 << 24) / kernelSize);
const __m128i half = _mm_set1_epi32(1 << 23);
const __m128i zero = _mm_setzero_si128();
for (int y = 0; y < height; ++y) {
__m128i sum = zero;
const SkPMColor* p = src;
for (int i = 0; i < rightBorder; ++i) {
sum = _mm_add_epi32(sum, expand(*p));
p += srcStrideX;
}
const SkPMColor* sptr = src;
SkColor* dptr = dst;
for (int x = 0; x < width; ++x) {
// SSE2 has no PMULLUD, so we must do AG and RB separately.
__m128i tmp1 = _mm_mul_epu32(sum, scale);
__m128i tmp2 = _mm_mul_epu32(_mm_srli_si128(sum, 4),
_mm_srli_si128(scale, 4));
__m128i result = _mm_unpacklo_epi32(_mm_shuffle_epi32(tmp1, _MM_SHUFFLE(0,0,2,0)),
_mm_shuffle_epi32(tmp2, _MM_SHUFFLE(0,0,2,0)));
// sumA*scale+.5 sumB*scale+.5 sumG*scale+.5 sumB*scale+.5
result = _mm_add_epi32(result, half);
// 0 0 0 A 0 0 0 R 0 0 0 G 0 0 0 B
result = _mm_srli_epi32(result, 24);
// 0 0 0 0 0 0 0 0 0 A 0 R 0 G 0 B
result = _mm_packs_epi32(result, zero);
// 0 0 0 0 0 0 0 0 0 0 0 0 A R G B
result = _mm_packus_epi16(result, zero);
*dptr = _mm_cvtsi128_si32(result);
if (x >= leftOffset) {
SkColor l = *(sptr - leftOffset * srcStrideX);
sum = _mm_sub_epi32(sum, expand(l));
}
if (x + rightOffset + 1 < width) {
SkColor r = *(sptr + (rightOffset + 1) * srcStrideX);
sum = _mm_add_epi32(sum, expand(r));
}
sptr += srcStrideX;
if (srcDirection == kY) {
_mm_prefetch(reinterpret_cast<const char*>(sptr + (rightOffset + 1) * srcStrideX),
_MM_HINT_T0);
}
dptr += dstStrideX;
}
src += srcStrideY;
dst += dstStrideY;
}
}
} // namespace
bool SkBoxBlurGetPlatformProcs_SSE2(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX) {
*boxBlurX = SkBoxBlur_SSE2<kX, kX>;
*boxBlurXY = SkBoxBlur_SSE2<kX, kY>;
*boxBlurYX = SkBoxBlur_SSE2<kY, kX>;
return true;
}

View File

@ -1,17 +0,0 @@
/*
* Copyright 2013 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkBlurImage_opts_SSE2_DEFINED
#define SkBlurImage_opts_SSE2_DEFINED
#include "SkBlurImage_opts.h"
bool SkBoxBlurGetPlatformProcs_SSE2(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX);
#endif

View File

@ -1,115 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkBitmap.h"
#include "SkBlurImage_opts_SSE4.h"
#include "SkColorPriv.h"
#include "SkRect.h"
/* With the exception of the compilers that don't support it, we always build the
* SSE4 functions and enable the caller to determine SSE4 support. However for
* compilers that do not support SSE4x we provide a stub implementation.
*/
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
#include <smmintrin.h>
namespace {
enum BlurDirection {
kX, kY
};
/* Helper function to spread the components of a 32-bit integer into the
* lower 8 bits of each 32-bit element of an SSE register.
*/
inline __m128i expand(int a) {
// ARGB -> 0000 0000 0000 ARGB
__m128i widened = _mm_cvtsi32_si128(a);
// SSE4.1 has xxxx xxxx xxxx ARGB -> 000A 000R 000G 000B as a one-stop-shop instruction.
// It can even work from memory, so a smart compiler probably merges in the _mm_cvtsi32_si128().
return _mm_cvtepu8_epi32(widened);
}
template<BlurDirection srcDirection, BlurDirection dstDirection>
void SkBoxBlur_SSE4(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height)
{
const int rightBorder = SkMin32(rightOffset + 1, width);
const int srcStrideX = srcDirection == kX ? 1 : srcStride;
const int dstStrideX = dstDirection == kX ? 1 : height;
const int srcStrideY = srcDirection == kX ? srcStride : 1;
const int dstStrideY = dstDirection == kX ? width : 1;
const __m128i scale = _mm_set1_epi32((1 << 24) / kernelSize);
const __m128i half = _mm_set1_epi32(1 << 23);
for (int y = 0; y < height; ++y) {
__m128i sum = _mm_setzero_si128();
const SkPMColor* p = src;
for (int i = 0; i < rightBorder; ++i) {
sum = _mm_add_epi32(sum, expand(*p));
p += srcStrideX;
}
const SkPMColor* sptr = src;
SkColor* dptr = dst;
for (int x = 0; x < width; ++x) {
// TODO(mtklein): We are working in 8.24 here. Drop to 8.8 when the kernel is narrow?
// Multiply each component by scale (i.e. divide by kernel size) and add half to round.
__m128i result = _mm_mullo_epi32(sum, scale);
result = _mm_add_epi32(result, half);
// Now pack the top byte of each 32-bit lane back down into one 32-bit color.
// Axxx Rxxx Gxxx Bxxx -> xxxx xxxx xxxx ARGB
const char _ = 0; // Don't care what ends up in these bytes. Happens to be byte 0.
result = _mm_shuffle_epi8(result, _mm_set_epi8(_,_,_,_, _,_,_,_, _,_,_,_, 15,11,7,3));
*dptr = _mm_cvtsi128_si32(result);
// TODO(mtklein): experiment with breaking this loop into 3 parts
if (x >= leftOffset) {
SkColor l = *(sptr - leftOffset * srcStrideX);
sum = _mm_sub_epi32(sum, expand(l));
}
if (x + rightOffset + 1 < width) {
SkColor r = *(sptr + (rightOffset + 1) * srcStrideX);
sum = _mm_add_epi32(sum, expand(r));
}
sptr += srcStrideX;
if (srcDirection == kY) {
// TODO(mtklein): experiment with moving this prefetch forward
_mm_prefetch(reinterpret_cast<const char*>(sptr + (rightOffset + 1) * srcStrideX),
_MM_HINT_T0);
}
dptr += dstStrideX;
}
src += srcStrideY;
dst += dstStrideY;
}
}
} // namespace
bool SkBoxBlurGetPlatformProcs_SSE4(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX) {
*boxBlurX = SkBoxBlur_SSE4<kX, kX>;
*boxBlurXY = SkBoxBlur_SSE4<kX, kY>;
*boxBlurYX = SkBoxBlur_SSE4<kY, kX>;
return true;
}
#else // SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
bool SkBoxBlurGetPlatformProcs_SSE4(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX) {
sk_throw();
return false;
}
#endif

View File

@ -1,17 +0,0 @@
/*
* Copyright 2014 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkBlurImage_opts_SSE4_DEFINED
#define SkBlurImage_opts_SSE4_DEFINED
#include "SkBlurImage_opts.h"
bool SkBoxBlurGetPlatformProcs_SSE4(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX);
#endif

View File

@ -1,24 +0,0 @@
/*
* Copyright 2014 ARM Ltd.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkBlurImage_opts_neon.h"
#include "SkUtilsArm.h"
bool SkBoxBlurGetPlatformProcs(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX) {
#if SK_ARM_NEON_IS_NONE
return false;
#else
#if SK_ARM_NEON_IS_DYNAMIC
if (!sk_cpu_arm_has_neon()) {
return false;
}
#endif
return SkBoxBlurGetPlatformProcs_NEON(boxBlurX, boxBlurXY, boxBlurYX);
#endif
}

View File

@ -1,186 +0,0 @@
/*
* Copyright 2013 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkBitmap.h"
#include "SkColorPriv.h"
#include "SkBlurImage_opts.h"
#include "SkRect.h"
#include <arm_neon.h>
namespace {
enum BlurDirection {
kX, kY
};
/**
* Helper function to load 2 pixels from diffent rows to a 8x8 NEON register
* and also pre-load pixels for future read
*/
template<BlurDirection srcDirection>
inline uint8x8_t load_2_pixels(const SkPMColor* src, int srcStride) {
if (srcDirection == kX) {
uint32x2_t temp = vdup_n_u32(0);
// 10% faster by adding these 2 prefetches
SK_PREFETCH(src + 16);
SK_PREFETCH(src + srcStride + 16);
return vreinterpret_u8_u32(vld1_lane_u32(src + srcStride, vld1_lane_u32(src, temp, 0), 1));
} else {
return vld1_u8((uint8_t*)src);
}
}
/**
* Helper function to store the low 8-bits from a 16x8 NEON register to 2 rows
*/
template<BlurDirection dstDirection>
inline void store_2_pixels(uint16x8_t result16x8, SkPMColor* dst, int dstStride) {
if (dstDirection == kX) {
uint32x2_t temp = vreinterpret_u32_u8(vmovn_u16(result16x8));
vst1_lane_u32(dst, temp, 0);
vst1_lane_u32(dst + dstStride, temp, 1);
} else {
uint8x8_t temp = vmovn_u16(result16x8);
vst1_u8((uint8_t*)dst, temp);
}
}
/**
* fast path for kernel size less than 128
*/
template<BlurDirection srcDirection, BlurDirection dstDirection>
void SkDoubleRowBoxBlur_NEON(const SkPMColor** src, int srcStride, SkPMColor** dst, int kernelSize,
int leftOffset, int rightOffset, int width, int* height)
{
const int rightBorder = SkMin32(rightOffset + 1, width);
const int srcStrideX = srcDirection == kX ? 1 : srcStride;
const int dstStrideX = dstDirection == kX ? 1 : *height;
const int srcStrideY = srcDirection == kX ? srcStride : 1;
const int dstStrideY = dstDirection == kX ? width : 1;
const uint16x8_t scale = vdupq_n_u16((1 << 15) / kernelSize);
for (; *height >= 2; *height -= 2) {
uint16x8_t sum = vdupq_n_u16(0);
const SkPMColor* p = *src;
for (int i = 0; i < rightBorder; i++) {
sum = vaddw_u8(sum,
load_2_pixels<srcDirection>(p, srcStride));
p += srcStrideX;
}
const SkPMColor* sptr = *src;
SkPMColor* dptr = *dst;
for (int x = 0; x < width; x++) {
// val = (sum * scale * 2 + 0x8000) >> 16
uint16x8_t resultPixels = vreinterpretq_u16_s16(vqrdmulhq_s16(
vreinterpretq_s16_u16(sum), vreinterpretq_s16_u16(scale)));
store_2_pixels<dstDirection>(resultPixels, dptr, width);
if (x >= leftOffset) {
sum = vsubw_u8(sum,
load_2_pixels<srcDirection>(sptr - leftOffset * srcStrideX, srcStride));
}
if (x + rightOffset + 1 < width) {
sum = vaddw_u8(sum,
load_2_pixels<srcDirection>(sptr + (rightOffset + 1) * srcStrideX, srcStride));
}
sptr += srcStrideX;
dptr += dstStrideX;
}
*src += srcStrideY * 2;
*dst += dstStrideY * 2;
}
}
/**
* Helper function to spread the components of a 32-bit integer into the
* lower 8 bits of each 16-bit element of a NEON register.
*/
static inline uint16x4_t expand(uint32_t a) {
// ( ARGB ) -> ( ARGB ARGB ) -> ( A R G B A R G B )
uint8x8_t v8 = vreinterpret_u8_u32(vdup_n_u32(a));
// ( A R G B A R G B ) -> ( 0A 0R 0G 0B 0A 0R 0G 0B ) -> ( 0A 0R 0G 0B )
return vget_low_u16(vmovl_u8(v8));
}
template<BlurDirection srcDirection, BlurDirection dstDirection>
void SkBoxBlur_NEON(const SkPMColor* src, int srcStride, SkPMColor* dst, int kernelSize,
int leftOffset, int rightOffset, int width, int height)
{
const int rightBorder = SkMin32(rightOffset + 1, width);
const int srcStrideX = srcDirection == kX ? 1 : srcStride;
const int dstStrideX = dstDirection == kX ? 1 : height;
const int srcStrideY = srcDirection == kX ? srcStride : 1;
const int dstStrideY = dstDirection == kX ? width : 1;
const uint32x4_t scale = vdupq_n_u32((1 << 24) / kernelSize);
const uint32x4_t half = vdupq_n_u32(1 << 23);
if (1 < kernelSize && kernelSize < 128)
{
SkDoubleRowBoxBlur_NEON<srcDirection, dstDirection>(&src, srcStride, &dst, kernelSize,
leftOffset, rightOffset, width, &height);
}
for (; height > 0; height--) {
uint32x4_t sum = vdupq_n_u32(0);
const SkPMColor* p = src;
for (int i = 0; i < rightBorder; ++i) {
sum = vaddw_u16(sum, expand(*p));
p += srcStrideX;
}
const SkPMColor* sptr = src;
SkPMColor* dptr = dst;
for (int x = 0; x < width; ++x) {
// ( half+sumA*scale half+sumR*scale half+sumG*scale half+sumB*scale )
uint32x4_t result = vmlaq_u32(half, sum, scale);
// Saturated conversion to 16-bit.
// ( AAAA RRRR GGGG BBBB ) -> ( 0A 0R 0G 0B )
uint16x4_t result16 = vqshrn_n_u32(result, 16);
// Saturated conversion to 8-bit.
// ( 0A 0R 0G 0B ) -> ( 0A 0R 0G 0B 0A 0R 0G 0B ) -> ( A R G B A R G B )
uint8x8_t result8 = vqshrn_n_u16(vcombine_u16(result16, result16), 8);
// ( A R G B A R G B ) -> ( ARGB ARGB ) -> ( ARGB )
// Store low 32 bits to destination.
vst1_lane_u32(dptr, vreinterpret_u32_u8(result8), 0);
if (x >= leftOffset) {
const SkPMColor* l = sptr - leftOffset * srcStrideX;
sum = vsubw_u16(sum, expand(*l));
}
if (x + rightOffset + 1 < width) {
const SkPMColor* r = sptr + (rightOffset + 1) * srcStrideX;
sum = vaddw_u16(sum, expand(*r));
}
sptr += srcStrideX;
if (srcDirection == kX) {
SK_PREFETCH(sptr + (rightOffset + 16) * srcStrideX);
}
dptr += dstStrideX;
}
src += srcStrideY;
dst += dstStrideY;
}
}
} // namespace
bool SkBoxBlurGetPlatformProcs_NEON(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX) {
*boxBlurX = SkBoxBlur_NEON<kX, kX>;
*boxBlurXY = SkBoxBlur_NEON<kX, kY>;
*boxBlurYX = SkBoxBlur_NEON<kY, kX>;
return true;
}

View File

@ -1,12 +0,0 @@
/*
* Copyright 2013 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkBlurImage_opts.h"
bool SkBoxBlurGetPlatformProcs_NEON(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX);

View File

@ -1,14 +0,0 @@
/*
* Copyright 2013 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkBlurImage_opts.h"
bool SkBoxBlurGetPlatformProcs(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX) {
return false;
}

View File

@ -7,6 +7,8 @@
#include "SkFloatingPoint.h"
#include "SkOpts.h"
#define SK_OPTS_NS neon
#include "SkBlurImageFilter_opts.h"
#include "SkXfermode_opts.h"
namespace neon { // This helps identify methods from this file when debugging / profiling.
@ -71,5 +73,10 @@ namespace SkOpts {
memset16 = neon::memset16;
memset32 = neon::memset32;
create_xfermode = SkCreate4pxXfermode;
static const auto x = neon::kX, y = neon::kY;
box_blur_xx = neon::box_blur<x,x>;
box_blur_xy = neon::box_blur<x,y>;
box_blur_yx = neon::box_blur<y,x>;
}
}

View File

@ -6,6 +6,9 @@
*/
#include "SkOpts.h"
#define SK_OPTS_NS sse2
#include "SkBlurImageFilter_opts.h"
#include "SkXfermode_opts.h"
namespace sse2 { // This helps identify methods from this file when debugging / profiling.
@ -53,5 +56,10 @@ namespace SkOpts {
memset16 = sse2::memset16;
memset32 = sse2::memset32;
create_xfermode = SkCreate4pxXfermode;
static const auto x = sse2::kX, y = sse2::kY;
box_blur_xx = sse2::box_blur<x,x>;
box_blur_xy = sse2::box_blur<x,y>;
box_blur_yx = sse2::box_blur<y,x>;
}
}

View File

@ -6,9 +6,14 @@
*/
#include "SkOpts.h"
#define SK_OPTS_NS sse41
#include "SkBlurImageFilter_opts.h"
namespace SkOpts {
void Init_sse41() {
static const auto x = sse41::kX, y = sse41::kY;
box_blur_xx = sse41::box_blur<x,x>;
box_blur_xy = sse41::box_blur<x,y>;
box_blur_yx = sse41::box_blur<y,x>;
}
}

View File

@ -12,9 +12,7 @@
#include "SkPMFloat.h"
#include "SkXfermode_proccoeff.h"
// This file is possibly included into multiple .cpp files.
// Each gets its own independent instantiation by wrapping in an anonymous namespace.
namespace {
namespace /* TODO: SK_OPTS_NS */ {
// Most xfermodes can be done most efficiently 4 pixels at a time in 8 or 16-bit fixed point.
#define XFERMODE(Name) static Sk4px SK_VECTORCALL Name(Sk4px s, Sk4px d)
@ -316,6 +314,6 @@ static SkXfermode* SkCreate4pxXfermode(const ProcCoeff& rec, SkXfermode::Mode mo
return nullptr;
}
} // namespace
} // namespace SK_NS_OPTS
#endif//Sk4pxXfermode_DEFINED

View File

@ -13,8 +13,6 @@
#include "SkBlitRow.h"
#include "SkBlitRow_opts_SSE2.h"
#include "SkBlitRow_opts_SSE4.h"
#include "SkBlurImage_opts_SSE2.h"
#include "SkBlurImage_opts_SSE4.h"
#include "SkLazyPtr.h"
#include "SkMorphology_opts.h"
#include "SkMorphology_opts_SSE2.h"
@ -316,17 +314,3 @@ SkMorphologyImageFilter::Proc SkMorphologyGetPlatformProc(SkMorphologyProcType t
return NULL;
}
}
////////////////////////////////////////////////////////////////////////////////
bool SkBoxBlurGetPlatformProcs(SkBoxBlurProc* boxBlurX,
SkBoxBlurProc* boxBlurXY,
SkBoxBlurProc* boxBlurYX) {
if (supports_simd(SK_CPU_SSE_LEVEL_SSE41)) {
return SkBoxBlurGetPlatformProcs_SSE4(boxBlurX, boxBlurXY, boxBlurYX);
}
else if (supports_simd(SK_CPU_SSE_LEVEL_SSE2)) {
return SkBoxBlurGetPlatformProcs_SSE2(boxBlurX, boxBlurXY, boxBlurYX);
}
return false;
}