Add aarch64 tail code.

Change-Id: I25f029604a04f5fc6c249a3817b0dd84379071be
Reviewed-on: https://skia-review.googlesource.com/18149
Commit-Queue: Mike Klein <mtklein@chromium.org>
Reviewed-by: Mike Klein <mtklein@chromium.org>
This commit is contained in:
Herb Derby 2017-05-30 16:56:32 -04:00 committed by Skia Commit-Bot
parent 5edba45dca
commit 84dcac3292
5 changed files with 1135 additions and 705 deletions

View File

@ -122,7 +122,7 @@ static SkJumper_Engine choose_engine() {
return {
#define M(stage) ASM(stage, aarch64),
{ SK_RASTER_PIPELINE_STAGES(M) },
4, M(start_pipeline) M(just_return)
1, M(start_pipeline) M(just_return)
#undef M
};

File diff suppressed because it is too large Load Diff

View File

@ -76,7 +76,7 @@ struct LazyCtx {
// We're finally going to get to what a Stage function looks like!
// It's best to jump down to the #else case first, then to come back up here for AVX.
#if defined(JUMPER) && (defined(__SSE2__) || defined(__arm__))
#if defined(JUMPER) && (defined(__SSE2__) || defined(__arm__) || defined(__aarch64__))
// Process the tail on all x86 processors with SSE2 or better instructions.
// tail == 0 ~~> work on a full kStride pixels
// tail != 0 ~~> work on only the first tail pixels

View File

@ -108,31 +108,63 @@
}
SI void load3(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b) {
uint16x4x3_t rgb = vld3_u16(ptr);
uint16x4x3_t rgb;
if (__builtin_expect(tail,0)) {
if ( true ) { rgb = vld3_lane_u16(ptr + 0, rgb, 0); }
if (tail > 1) { rgb = vld3_lane_u16(ptr + 3, rgb, 1); }
if (tail > 2) { rgb = vld3_lane_u16(ptr + 6, rgb, 2); }
} else {
rgb = vld3_u16(ptr);
}
*r = rgb.val[0];
*g = rgb.val[1];
*b = rgb.val[2];
}
SI void load4(const uint16_t* ptr, size_t tail, U16* r, U16* g, U16* b, U16* a) {
uint16x4x4_t rgba = vld4_u16(ptr);
uint16x4x4_t rgba;
if (__builtin_expect(tail,0)) {
if ( true ) { rgba = vld4_lane_u16(ptr + 0, rgba, 0); }
if (tail > 1) { rgba = vld4_lane_u16(ptr + 4, rgba, 1); }
if (tail > 2) { rgba = vld4_lane_u16(ptr + 8, rgba, 2); }
} else {
rgba = vld4_u16(ptr);
}
*r = rgba.val[0];
*g = rgba.val[1];
*b = rgba.val[2];
*a = rgba.val[3];
}
SI void store4(uint16_t* ptr, size_t tail, U16 r, U16 g, U16 b, U16 a) {
vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
if (__builtin_expect(tail,0)) {
if ( true ) { vst4_lane_u16(ptr + 0, (uint16x4x4_t{{r,g,b,a}}), 0); }
if (tail > 1) { vst4_lane_u16(ptr + 4, (uint16x4x4_t{{r,g,b,a}}), 1); }
if (tail > 2) { vst4_lane_u16(ptr + 8, (uint16x4x4_t{{r,g,b,a}}), 2); }
} else {
vst4_u16(ptr, (uint16x4x4_t{{r,g,b,a}}));
}
}
SI void load4(const float* ptr, size_t tail, F* r, F* g, F* b, F* a) {
float32x4x4_t rgba = vld4q_f32(ptr);
float32x4x4_t rgba;
if (__builtin_expect(tail,0)) {
if ( true ) { rgba = vld4q_lane_f32(ptr + 0, rgba, 0); }
if (tail > 1) { rgba = vld4q_lane_f32(ptr + 4, rgba, 1); }
if (tail > 2) { rgba = vld4q_lane_f32(ptr + 8, rgba, 2); }
} else {
rgba = vld4q_f32(ptr);
}
*r = rgba.val[0];
*g = rgba.val[1];
*b = rgba.val[2];
*a = rgba.val[3];
}
SI void store4(float* ptr, size_t tail, F r, F g, F b, F a) {
vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
if (__builtin_expect(tail,0)) {
if ( true ) { vst4q_lane_f32(ptr + 0, (float32x4x4_t{{r,g,b,a}}), 0); }
if (tail > 1) { vst4q_lane_f32(ptr + 4, (float32x4x4_t{{r,g,b,a}}), 1); }
if (tail > 2) { vst4q_lane_f32(ptr + 8, (float32x4x4_t{{r,g,b,a}}), 2); }
} else {
vst4q_f32(ptr, (float32x4x4_t{{r,g,b,a}}));
}
}
#elif defined(__arm__)

View File

@ -115,15 +115,18 @@ DEF_TEST(SkRasterPipeline_tail, r) {
float* src = &data[0][0];
float* dst = &buffer[0][0];
for (unsigned i = 0; i < 4; i++) {
for (unsigned i = 1; i <= 4; i++) {
memset(buffer, 0xff, sizeof(buffer));
SkRasterPipeline_<256> p;
p.append(SkRasterPipeline::load_f32, &src);
p.append(SkRasterPipeline::store_f32, &dst);
p.run(0, i);
for (unsigned j = 0; j < i; j++) {
REPORTER_ASSERT(r,
!memcmp(&data[j][0], &buffer[j][0], sizeof(buffer[j])));
for (unsigned k = 0; k < 4; k++) {
if (buffer[j][k] != data[j][k]) {
ERRORF(r, "(%u, %u) - a: %g r: %g\n", j, k, data[j][k], buffer[j][k]);
}
}
}
for (int j = i; j < 4; j++) {
for (auto f : buffer[j]) {
@ -144,7 +147,7 @@ DEF_TEST(SkRasterPipeline_tail, r) {
uint16_t* src = &data[0][0];
uint16_t* dst = &buffer[0][0];
for (unsigned i = 0; i < 4; i++) {
for (unsigned i = 1; i <= 4; i++) {
memset(buffer, 0xff, sizeof(buffer));
SkRasterPipeline_<256> p;
p.append(SkRasterPipeline::load_f16, &src);
@ -181,7 +184,7 @@ DEF_TEST(SkRasterPipeline_tail, r) {
uint16_t* src = &data[0][0];
float* dst = &buffer[0][0];
for (unsigned i = 0; i < 4; i++) {
for (unsigned i = 1; i <= 4; i++) {
memset(buffer, 0xff, sizeof(buffer));
SkRasterPipeline_<256> p;
p.append(SkRasterPipeline::load_rgb_u16_be, &src);