Add arm tail code.

CQ_INCLUDE_TRYBOTS=skia.primary:Test-Android-Clang-Nexus10-CPU-Exynos5250-arm-Release-Android

Change-Id: Ia0e9f32d0324e66c9d4812dbb156a2b858d49a13
Reviewed-on: https://skia-review.googlesource.com/18127
Commit-Queue: Herb Derby <herb@google.com>
Commit-Queue: Mike Klein <mtklein@chromium.org>
Reviewed-by: Mike Klein <mtklein@chromium.org>
This commit is contained in:
Herb Derby 2017-05-30 14:22:49 -04:00 committed by Skia Commit-Bot
parent 836e6c1f7d
commit f81c56f3c0
5 changed files with 1591 additions and 1285 deletions

View File

@ -131,7 +131,7 @@ static SkJumper_Engine choose_engine() {
return {
#define M(stage) ASM(stage, vfp4),
{ SK_RASTER_PIPELINE_STAGES(M) },
2, M(start_pipeline) M(just_return)
1, M(start_pipeline) M(just_return)
#undef M
};
}

File diff suppressed because it is too large Load Diff

View File

@ -76,7 +76,7 @@ struct LazyCtx {
// We're finally going to get to what a Stage function looks like!
// It's best to jump down to the #else case first, then to come back up here for AVX.
#if defined(JUMPER) && defined(__SSE2__)
#if defined(JUMPER) && (defined(__SSE2__) || defined(__arm__))
// Process the tail on all x86 processors with SSE2 or better instructions.
// tail == 0 ~~> work on a full kStride pixels
// tail != 0 ~~> work on only the first tail pixels

View File

@ -182,7 +182,13 @@
SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
uint16x4x3_t rgb;
rgb = vld3_lane_u16(ptr + 0, rgb, 0);
rgb = vld3_lane_u16(ptr + 3, rgb, 1);
if (__builtin_expect(tail, 0)) {
vset_lane_u16(0, rgb.val[0], 1);
vset_lane_u16(0, rgb.val[1], 1);
vset_lane_u16(0, rgb.val[2], 1);
} else {
rgb = vld3_lane_u16(ptr + 3, rgb, 1);
}
*r = unaligned_load<U16>(rgb.val+0);
*g = unaligned_load<U16>(rgb.val+1);
*b = unaligned_load<U16>(rgb.val+2);
@ -190,7 +196,14 @@
SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
uint16x4x4_t rgba;
rgba = vld4_lane_u16(ptr + 0, rgba, 0);
rgba = vld4_lane_u16(ptr + 4, rgba, 1);
if (__builtin_expect(tail, 0)) {
vset_lane_u16(0, rgba.val[0], 1);
vset_lane_u16(0, rgba.val[1], 1);
vset_lane_u16(0, rgba.val[2], 1);
vset_lane_u16(0, rgba.val[3], 1);
} else {
rgba = vld4_lane_u16(ptr + 4, rgba, 1);
}
*r = unaligned_load<U16>(rgba.val+0);
*g = unaligned_load<U16>(rgba.val+1);
*b = unaligned_load<U16>(rgba.val+2);
@ -204,18 +217,29 @@
widen_cast<uint16x4_t>(a),
}};
vst4_lane_u16(ptr + 0, rgba, 0);
vst4_lane_u16(ptr + 4, rgba, 1);
if (__builtin_expect(tail == 0, true)) {
vst4_lane_u16(ptr + 4, rgba, 1);
}
}
SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
float32x2x4_t rgba = vld4_f32(ptr);
float32x2x4_t rgba;
if (__builtin_expect(tail, 0)) {
rgba = vld4_dup_f32(ptr);
} else {
rgba = vld4_f32(ptr);
}
*r = rgba.val[0];
*g = rgba.val[1];
*b = rgba.val[2];
*a = rgba.val[3];
}
SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
vst4_f32(ptr, (float32x2x4_t{{r,g,b,a}}));
if (__builtin_expect(tail, 0)) {
vst4_lane_f32(ptr, (float32x2x4_t{{r,g,b,a}}), 0);
} else {
vst4_f32(ptr, (float32x2x4_t{{r,g,b,a}}));
}
}

View File

@ -120,7 +120,7 @@ DEF_TEST(SkRasterPipeline_tail, r) {
SkRasterPipeline_<256> p;
p.append(SkRasterPipeline::load_f32, &src);
p.append(SkRasterPipeline::store_f32, &dst);
p.run(0, i % 4);
p.run(0, i);
for (unsigned j = 0; j < i; j++) {
REPORTER_ASSERT(r,
!memcmp(&data[j][0], &buffer[j][0], sizeof(buffer[j])));
@ -149,7 +149,7 @@ DEF_TEST(SkRasterPipeline_tail, r) {
SkRasterPipeline_<256> p;
p.append(SkRasterPipeline::load_f16, &src);
p.append(SkRasterPipeline::store_f16, &dst);
p.run(0, i % 4);
p.run(0, i);
for (unsigned j = 0; j < i; j++) {
REPORTER_ASSERT(r,
!memcmp(&data[j][0], &buffer[j][0], sizeof(buffer[j])));
@ -186,7 +186,7 @@ DEF_TEST(SkRasterPipeline_tail, r) {
SkRasterPipeline_<256> p;
p.append(SkRasterPipeline::load_rgb_u16_be, &src);
p.append(SkRasterPipeline::store_f32, &dst);
p.run(0, i % 4);
p.run(0, i);
for (unsigned j = 0; j < i; j++) {
for (unsigned k = 0; k < 4; k++) {
if (buffer[j][k] != answer[j][k]) {