Add SSE4 optimization of S32A_Opaque_Blitrow
Adds optimization of Skia S32A_Opaque_Blitrow blitter using SSE4.2 SIMD instruction set. Special case for when alpha is zero or opaque. Performance increase of 10%-400% compared to the existing SSE2 optimization (measured on Silvermont architecture). Noticeable in ~25 different skia bench subtests, especially in bitmap_8888_*, repeatTile_*, and morph_*. bitmap_8888_A - 100% faster bitmap_8888_A_source_transparent - 250% faster bitmap_8888_A_source_opaque - 25% faster bitmap_8888_A_scale_bicubic - 75% faster Signed-off-by: Henrik Smiding <henrik.smiding@intel.com> Committed: https://skia.googlesource.com/skia/+/e2527b147679b0c43019fae7d59cc3777d2d097e R=reed@google.com, mtklein@google.com, tomhudson@google.com, djsollen@google.com, joakim.landberg@intel.com Author: henrik.smiding@intel.com Review URL: https://codereview.chromium.org/289473009
This commit is contained in:
parent
baa860c297
commit
b5c281e1e0
38
gyp/opts.gyp
38
gyp/opts.gyp
@ -46,6 +46,7 @@
|
||||
],
|
||||
'dependencies': [
|
||||
'opts_ssse3',
|
||||
'opts_sse4',
|
||||
],
|
||||
'sources': [
|
||||
'../src/opts/opts_check_x86.cpp',
|
||||
@ -194,10 +195,45 @@
|
||||
}],
|
||||
],
|
||||
},
|
||||
# For the same lame reasons as what is done for skia_opts, we also have to
|
||||
# create another target specifically for SSE4 code as we would not want
|
||||
# to compile the SSE2 code with -msse4 which would potentially allow
|
||||
# gcc to generate SSE4 code.
|
||||
{
|
||||
'target_name': 'opts_sse4',
|
||||
'product_name': 'skia_opts_sse4',
|
||||
'type': 'static_library',
|
||||
'standalone_static_library': 1,
|
||||
'dependencies': [
|
||||
'core.gyp:*',
|
||||
'effects.gyp:*'
|
||||
],
|
||||
'include_dirs': [
|
||||
'../src/core',
|
||||
],
|
||||
'conditions': [
|
||||
[ 'skia_os in ["linux", "freebsd", "openbsd", "solaris", "nacl", "chromeos", "android", "mac"] \
|
||||
and not skia_android_framework', {
|
||||
'cflags': [
|
||||
'-msse4',
|
||||
],
|
||||
}],
|
||||
[ 'skia_arch_width == 64 and skia_arch_type == "x86"', {
|
||||
'sources': [
|
||||
'../src/opts/SkBlitRow_opts_SSE4_x64_asm.S',
|
||||
],
|
||||
}],
|
||||
[ 'skia_arch_width == 32 and skia_arch_type == "x86"', {
|
||||
'sources': [
|
||||
'../src/opts/SkBlitRow_opts_SSE4_asm.S',
|
||||
],
|
||||
}],
|
||||
],
|
||||
},
|
||||
# NEON code must be compiled with -mfpu=neon which also affects scalar
|
||||
# code. To support dynamic NEON code paths, we need to build all
|
||||
# NEON-specific sources in a separate static library. The situation
|
||||
# is very similar to the SSSE3 one.
|
||||
# is very similar to the SSSE3 and SSE4 one.
|
||||
{
|
||||
'target_name': 'opts_neon',
|
||||
'product_name': 'skia_opts_neon',
|
||||
|
@ -15,6 +15,7 @@
|
||||
[ 'skia_arch_type == "x86" and skia_os != "android"', {
|
||||
'component_libs': [
|
||||
'opts.gyp:opts_ssse3',
|
||||
'opts.gyp:opts_sse4',
|
||||
],
|
||||
}],
|
||||
[ 'arm_neon == 1', {
|
||||
|
25
src/opts/SkBlitRow_opts_SSE4.h
Normal file
25
src/opts/SkBlitRow_opts_SSE4.h
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
* Copyright 2013 The Android Open Source Project
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef SkBlitRow_opts_SSE4_DEFINED
|
||||
#define SkBlitRow_opts_SSE4_DEFINED
|
||||
|
||||
#include "SkBlitRow.h"
|
||||
|
||||
/* Check if we are able to build assembly code, GCC/AT&T syntax.
|
||||
* Had problems with LLVM-GCC 4.2.
|
||||
*/
|
||||
#if defined(__clang__) || (defined(__GNUC__) && !defined(SK_BUILD_FOR_MAC))
|
||||
extern "C" void S32A_Opaque_BlitRow32_SSE4_asm(SkPMColor* SK_RESTRICT dst,
|
||||
const SkPMColor* SK_RESTRICT src,
|
||||
int count, U8CPU alpha);
|
||||
|
||||
#define SK_ATT_ASM_SUPPORTED
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
509
src/opts/SkBlitRow_opts_SSE4_asm.S
Normal file
509
src/opts/SkBlitRow_opts_SSE4_asm.S
Normal file
@ -0,0 +1,509 @@
|
||||
/*
|
||||
* Copyright 2013 The Android Open Source Project
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#if defined(__clang__) || (defined(__GNUC__) && !defined(SK_BUILD_FOR_MAC))
|
||||
|
||||
#define CFI_PUSH(REG) \
|
||||
.cfi_adjust_cfa_offset 4; \
|
||||
.cfi_rel_offset REG, 0
|
||||
|
||||
#define CFI_POP(REG) \
|
||||
.cfi_adjust_cfa_offset -4; \
|
||||
.cfi_restore REG
|
||||
|
||||
#define PUSH(REG) pushl REG; CFI_PUSH (REG)
|
||||
#define POP(REG) popl REG; CFI_POP (REG)
|
||||
#define RETURN POP(%edi); ret
|
||||
|
||||
#define EXTRACT_ALPHA(var1, var2) \
|
||||
movdqa %var1, %var2; /* Clone source pixels to extract alpha */\
|
||||
psrlw $8, %var2; /* Discard red and blue, leaving alpha and green */\
|
||||
pshufhw $0xF5, %var2, %var2; /* Repeat alpha for scaling (high) */\
|
||||
movdqa %xmm6, %xmm4; \
|
||||
pshuflw $0xF5, %var2, %var2; /* Repeat alpha for scaling (low) */\
|
||||
movdqa %xmm5, %xmm3; \
|
||||
psubw %var2, %xmm4 /* Finalize alpha calculations */
|
||||
|
||||
#define SCALE_PIXELS \
|
||||
psllw $8, %xmm5; /* Filter out red and blue components */\
|
||||
pmulhuw %xmm4, %xmm5; /* Scale red and blue */\
|
||||
psrlw $8, %xmm3; /* Filter out alpha and green components */\
|
||||
pmullw %xmm4, %xmm3 /* Scale alpha and green */
|
||||
|
||||
|
||||
/*
|
||||
* void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst,
|
||||
* const SkPMColor* SK_RESTRICT src,
|
||||
* int count, U8CPU alpha)
|
||||
*
|
||||
* This function is divided into six blocks: initialization, blit 4-15 pixels,
|
||||
* blit 0-3 pixels, align destination for 16+ pixel blits,
|
||||
* blit 16+ pixels with source unaligned, blit 16+ pixels with source aligned.
|
||||
* There are some code reuse between the blocks.
|
||||
*
|
||||
* The primary optimization comes from checking the source pixels' alpha value.
|
||||
* If the alpha is zero, the pixel can be skipped entirely.
|
||||
* If the alpha is fully opaque, the pixel can be copied directly to the destination.
|
||||
* According to collected statistics, these two cases are the most common.
|
||||
* The main loop(s) uses pre-loading and unrolling in an attempt to reduce the
|
||||
* memory latency worse-case.
|
||||
*/
|
||||
|
||||
#ifdef __clang__
|
||||
.text
|
||||
#else
|
||||
.section .text.sse4.2,"ax",@progbits
|
||||
.type S32A_Opaque_BlitRow32_SSE4_asm, @function
|
||||
#endif
|
||||
.global S32A_Opaque_BlitRow32_SSE4_asm
|
||||
.global _S32A_Opaque_BlitRow32_SSE4_asm
|
||||
|
||||
.p2align 4
|
||||
S32A_Opaque_BlitRow32_SSE4_asm:
|
||||
_S32A_Opaque_BlitRow32_SSE4_asm:
|
||||
.cfi_startproc
|
||||
movl 8(%esp), %eax // Source pointer
|
||||
movl 12(%esp), %ecx // Pixel count
|
||||
movl 4(%esp), %edx // Destination pointer
|
||||
prefetcht0 (%eax)
|
||||
|
||||
// Setup SSE constants
|
||||
pcmpeqd %xmm7, %xmm7 // 0xFF000000 mask to check alpha
|
||||
pslld $24, %xmm7
|
||||
pcmpeqw %xmm6, %xmm6 // 16-bit 256 to calculate inv. alpha
|
||||
psrlw $15, %xmm6
|
||||
psllw $8, %xmm6
|
||||
pcmpeqw %xmm0, %xmm0 // 0x00FF00FF mask (Must be in xmm0 because of pblendvb)
|
||||
psrlw $8, %xmm0
|
||||
subl $4, %ecx // Check if we have only 0-3 pixels
|
||||
js .LReallySmall
|
||||
PUSH(%edi)
|
||||
cmpl $11, %ecx // Do we have enough pixels to run the main loop?
|
||||
ja .LBigBlit
|
||||
|
||||
// Handle small blits (4-15 pixels)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
xorl %edi, %edi // Reset offset to zero
|
||||
|
||||
.LSmallLoop:
|
||||
lddqu (%eax, %edi), %xmm1 // Load four source pixels
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LSmallAlphaNotOpaqueOrZero
|
||||
jz .LSmallAlphaZero // If all alphas are zero, skip the pixels completely
|
||||
movdqu %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
.LSmallAlphaZero:
|
||||
addl $16, %edi
|
||||
subl $4, %ecx // Check if there are four additional pixels, at least
|
||||
jns .LSmallLoop
|
||||
jmp .LSmallRemaining
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
.p2align 4
|
||||
.LSmallAlphaNotOpaqueOrZero:
|
||||
lddqu (%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
addl $16, %edi
|
||||
subl $4, %ecx // Check if we can store all four pixels
|
||||
pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqu %xmm1, -16(%edx, %edi) // Store four destination pixels
|
||||
jns .LSmallLoop
|
||||
|
||||
// Handle the last 0-3 pixels (also used by the big unaligned loop)
|
||||
.LSmallRemaining:
|
||||
cmpl $-4, %ecx // Check if we are done
|
||||
je .LSmallExit
|
||||
sall $2, %ecx // Calculate offset for last pixels
|
||||
addl %ecx, %edi
|
||||
|
||||
lddqu (%eax, %edi), %xmm1 // Load last four source pixels (overlapping)
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
jc .LSmallRemainingStoreAll// If all alphas are opaque, just store (overlapping)
|
||||
jz .LSmallExit // If all alphas are zero, skip the pixels completely
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
lddqu (%edx, %edi), %xmm5 // Load last four destination pixels (overlapping)
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
|
||||
psllw $8, %xmm3 // Filter out red and blue components
|
||||
pmulhuw %xmm4, %xmm3 // Scale red and blue
|
||||
movdqa %xmm5, %xmm2
|
||||
psrlw $8, %xmm2 // Filter out alpha and green components
|
||||
pmullw %xmm4, %xmm2 // Scale alpha and green
|
||||
|
||||
cmpl $-8, %ecx // Check how many pixels should be written
|
||||
pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm2, %xmm1 // Add source and destination pixels together
|
||||
jb .LSmallPixelsLeft1
|
||||
ja .LSmallPixelsLeft3 // To avoid double-blending the overlapping pixels...
|
||||
pblendw $0xF0, %xmm1, %xmm5 // Merge only the final two pixels to the destination
|
||||
movdqu %xmm5, (%edx, %edi) // Store last two destination pixels
|
||||
.LSmallExit:
|
||||
RETURN
|
||||
|
||||
.LSmallPixelsLeft1:
|
||||
pblendw $0xC0, %xmm1, %xmm5 // Merge only the final pixel to the destination
|
||||
movdqu %xmm5, (%edx, %edi) // Store last destination pixel
|
||||
RETURN
|
||||
|
||||
.LSmallPixelsLeft3:
|
||||
pblendw $0xFC, %xmm1, %xmm5 // Merge only the final three pixels to the destination
|
||||
movdqu %xmm5, (%edx, %edi) // Store last three destination pixels
|
||||
RETURN
|
||||
|
||||
.LSmallRemainingStoreAll:
|
||||
movdqu %xmm1, (%edx, %edi) // Store last destination pixels (overwrite)
|
||||
RETURN
|
||||
|
||||
// Handle really small blits (0-3 pixels)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.LReallySmall:
|
||||
addl $4, %ecx
|
||||
jle .LReallySmallExit
|
||||
pcmpeqd %xmm1, %xmm1
|
||||
cmp $2, %ecx // Check how many pixels should be read
|
||||
pinsrd $0x0, (%eax), %xmm1 // Load one source pixel
|
||||
pinsrd $0x0, (%edx), %xmm5 // Load one destination pixel
|
||||
jb .LReallySmallCalc
|
||||
pinsrd $0x1, 4(%eax), %xmm1 // Load second source pixel
|
||||
pinsrd $0x1, 4(%edx), %xmm5 // Load second destination pixel
|
||||
je .LReallySmallCalc
|
||||
pinsrd $0x2, 8(%eax), %xmm1 // Load third source pixel
|
||||
pinsrd $0x2, 8(%edx), %xmm5 // Load third destination pixel
|
||||
|
||||
.LReallySmallCalc:
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are opaque
|
||||
jc .LReallySmallStore // If all alphas are opaque, just store
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
|
||||
pand %xmm0, %xmm5 // Filter out red and blue components
|
||||
pmullw %xmm4, %xmm5 // Scale red and blue
|
||||
psrlw $8, %xmm3 // Filter out alpha and green components
|
||||
pmullw %xmm4, %xmm3 // Scale alpha and green
|
||||
|
||||
psrlw $8, %xmm5 // Combine results
|
||||
pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
|
||||
.LReallySmallStore:
|
||||
cmp $2, %ecx // Check how many pixels should be written
|
||||
pextrd $0x0, %xmm1, (%edx) // Store one destination pixel
|
||||
jb .LReallySmallExit
|
||||
pextrd $0x1, %xmm1, 4(%edx) // Store second destination pixel
|
||||
je .LReallySmallExit
|
||||
pextrd $0x2, %xmm1, 8(%edx) // Store third destination pixel
|
||||
.LReallySmallExit:
|
||||
ret
|
||||
|
||||
// Handle bigger blit operations (16+ pixels)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.p2align 4
|
||||
.LBigBlit:
|
||||
// Align destination?
|
||||
testl $0xF, %edx
|
||||
lddqu (%eax), %xmm1 // Pre-load four source pixels
|
||||
jz .LAligned
|
||||
|
||||
movl %edx, %edi // Calculate alignment of destination pointer
|
||||
negl %edi
|
||||
andl $0xF, %edi
|
||||
|
||||
// Handle 1-3 pixels to align destination
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
jz .LAlignDone // If all alphas are zero, just skip
|
||||
lddqu (%edx), %xmm5 // Load four destination pixels
|
||||
jc .LAlignStore // If all alphas are opaque, just store
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
|
||||
psllw $8, %xmm3 // Filter out red and blue components
|
||||
pmulhuw %xmm4, %xmm3 // Scale red and blue
|
||||
movdqa %xmm5, %xmm2
|
||||
psrlw $8, %xmm2 // Filter out alpha and green components
|
||||
pmullw %xmm4, %xmm2 // Scale alpha and green
|
||||
|
||||
pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm2, %xmm1 // Add source and destination pixels together
|
||||
|
||||
.LAlignStore:
|
||||
cmp $8, %edi // Check how many pixels should be written
|
||||
jb .LAlignPixelsLeft1
|
||||
ja .LAlignPixelsLeft3
|
||||
pblendw $0x0F, %xmm1, %xmm5 // Blend two pixels
|
||||
jmp .LAlignStorePixels
|
||||
|
||||
.LAlignPixelsLeft1:
|
||||
pblendw $0x03, %xmm1, %xmm5 // Blend one pixel
|
||||
jmp .LAlignStorePixels
|
||||
|
||||
.LAlignPixelsLeft3:
|
||||
pblendw $0x3F, %xmm1, %xmm5 // Blend three pixels
|
||||
|
||||
.LAlignStorePixels:
|
||||
movdqu %xmm5, (%edx) // Store destination pixels
|
||||
|
||||
.LAlignDone:
|
||||
addl %edi, %eax // Adjust pointers and pixel count
|
||||
addl %edi, %edx
|
||||
shrl $2, %edi
|
||||
lddqu (%eax), %xmm1 // Pre-load new source pixels (after alignment)
|
||||
subl %edi, %ecx
|
||||
|
||||
.LAligned: // Destination is guaranteed to be 16 byte aligned
|
||||
xorl %edi, %edi // Reset offset to zero
|
||||
subl $8, %ecx // Decrease counter (Reserve four pixels for the cleanup)
|
||||
testl $0xF, %eax // Check alignment of source pointer
|
||||
jz .LAlignedLoop
|
||||
|
||||
// Source not aligned to destination
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.p2align 4
|
||||
.LUnalignedLoop: // Main loop for unaligned, handles eight pixels per iteration
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero00
|
||||
lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels
|
||||
jz .LAlphaZero00
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero00:
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero01
|
||||
lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero01
|
||||
movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero01:
|
||||
addl $32, %edi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LUnalignedLoop
|
||||
addl $8, %ecx // Adjust pixel count
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero00:
|
||||
movdqa (%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
lddqu 16(%eax, %edi), %xmm2 // Pre-load four source pixels
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
|
||||
// Handle next four pixels
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero01
|
||||
lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero02
|
||||
movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
|
||||
.LAlphaZero02:
|
||||
addl $32, %edi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LUnalignedLoop
|
||||
addl $8, %ecx // Adjust pixel count
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero01:
|
||||
movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
lddqu 32(%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
addl $32, %edi
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm2 // Add source and destination pixels together
|
||||
subl $8, %ecx
|
||||
movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels
|
||||
jae .LUnalignedLoop
|
||||
addl $8, %ecx // Adjust pixel count
|
||||
|
||||
// Cleanup - handle pending pixels from loop
|
||||
.LLoopCleanup0:
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero02
|
||||
jz .LAlphaZero03
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
.LAlphaZero03:
|
||||
addl $16, %edi
|
||||
subl $4, %ecx
|
||||
js .LSmallRemaining // Reuse code from small loop
|
||||
lddqu (%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
.LAlphaNotOpaqueOrZero02:
|
||||
movdqa (%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
addl $16, %edi
|
||||
subl $4, %ecx
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, -16(%edx, %edi) // Store four destination pixels
|
||||
js .LSmallRemaining // Reuse code from small loop
|
||||
lddqu (%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
// Source aligned to destination
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.p2align 4
|
||||
.LAlignedLoop: // Main loop for aligned, handles eight pixels per iteration
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero10
|
||||
movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels
|
||||
jz .LAlphaZero10
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero10:
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero11
|
||||
movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero11
|
||||
movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero11:
|
||||
addl $32, %edi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LAlignedLoop
|
||||
jmp .LLoopCleanup1
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero10:
|
||||
movdqa (%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
movdqa 16(%eax, %edi), %xmm2 // Pre-load four source pixels
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
|
||||
// Handle next four pixels
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero11
|
||||
movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero12
|
||||
movdqa %xmm2, 16(%edx, %edi) // Store four destination pixels
|
||||
.LAlphaZero12:
|
||||
addl $32, %edi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LAlignedLoop
|
||||
jmp .LLoopCleanup1
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero11:
|
||||
movdqa 16(%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
movdqa 32(%eax, %edi), %xmm1 // Pre-load four source pixels
|
||||
|
||||
addl $32, %edi
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm2 // Add source and destination pixels together
|
||||
subl $8, %ecx
|
||||
movdqa %xmm2, -16(%edx, %edi) // Store four destination pixels
|
||||
jae .LAlignedLoop
|
||||
|
||||
// Cleanup - handle four pending pixels from loop
|
||||
.LLoopCleanup1:
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero12
|
||||
jz .LAlphaZero13
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
.LAlphaZero13:
|
||||
addl $8, %ecx // Adjust offset and pixel count
|
||||
jz .LExit
|
||||
addl $16, %edi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
.LAlphaNotOpaqueOrZero12:
|
||||
movdqa (%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
addl $8, %ecx // Adjust offset and pixel count
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
jz .LExit
|
||||
addl $16, %edi
|
||||
|
||||
// Handle last 1-7 pixels
|
||||
.LRemainLoop1:
|
||||
movdqa (%eax, %edi), %xmm1 // Load four source pixels
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LRemainAlphaNotOpaqueOrZero1
|
||||
jz .LRemainAlphaZero1
|
||||
|
||||
// All alphas were opaque (copy)
|
||||
subl $4, %ecx // Check if we have more than four pixels left
|
||||
jle .LRemainStore
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
addl $16, %edi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
// All alphas were zero (skip)
|
||||
.p2align 4
|
||||
.LRemainAlphaZero1:
|
||||
subl $4, %ecx // Check if we have more than four pixels left
|
||||
jle .LExit
|
||||
addl $16, %edi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
.p2align 4
|
||||
.LRemainAlphaNotOpaqueOrZero1:
|
||||
movdqa (%edx, %edi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
subl $4, %ecx
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
jle .LRemainStore
|
||||
movdqa %xmm1, (%edx, %edi) // Store four destination pixels
|
||||
addl $16, %edi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
// Store the last 1-4 pixels
|
||||
.p2align 4
|
||||
.LRemainStore:
|
||||
jz .LRemainFull
|
||||
movdqa (%edx, %edi), %xmm5 // Load four destination pixels
|
||||
cmp $-2, %ecx // Check how many pixels should be written
|
||||
jb .LRemainPixelsLeft11
|
||||
ja .LRemainPixelsLeft13
|
||||
pblendw $0x0F, %xmm1, %xmm5
|
||||
movdqa %xmm5, (%edx, %edi) // Store last 2 destination pixels
|
||||
.LExit:
|
||||
RETURN
|
||||
|
||||
.LRemainPixelsLeft11:
|
||||
pblendw $0x03, %xmm1, %xmm5
|
||||
movdqa %xmm5, (%edx, %edi) // Store last destination pixel
|
||||
RETURN
|
||||
|
||||
.LRemainPixelsLeft13:
|
||||
pblendw $0x3F, %xmm1, %xmm5
|
||||
movdqa %xmm5, (%edx, %edi) // Store last 3 destination pixels
|
||||
RETURN
|
||||
|
||||
.LRemainFull:
|
||||
movdqa %xmm1, (%edx, %edi) // Store last 4 destination pixels
|
||||
RETURN
|
||||
|
||||
.cfi_endproc
|
||||
#ifndef __clang__
|
||||
.size S32A_Opaque_BlitRow32_SSE4_asm, .-S32A_Opaque_BlitRow32_SSE4_asm
|
||||
#endif
|
||||
#endif
|
506
src/opts/SkBlitRow_opts_SSE4_x64_asm.S
Normal file
506
src/opts/SkBlitRow_opts_SSE4_x64_asm.S
Normal file
@ -0,0 +1,506 @@
|
||||
/*
|
||||
* Copyright 2013 The Android Open Source Project
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#if defined(__clang__) || (defined(__GNUC__) && !defined(SK_BUILD_FOR_MAC))
|
||||
|
||||
#define EXTRACT_ALPHA(var1, var2) \
|
||||
movdqa %var1, %var2; /* Clone source pixels to extract alpha */\
|
||||
psrlw $8, %var2; /* Discard red and blue, leaving alpha and green */\
|
||||
pshufhw $0xF5, %var2, %var2; /* Repeat alpha for scaling (high) */\
|
||||
movdqa %xmm6, %xmm4; \
|
||||
pshuflw $0xF5, %var2, %var2; /* Repeat alpha for scaling (low) */\
|
||||
movdqa %xmm5, %xmm3; \
|
||||
psubw %var2, %xmm4 /* Finalize alpha calculations */
|
||||
|
||||
#define SCALE_PIXELS \
|
||||
psllw $8, %xmm5; /* Filter out red and blue components */\
|
||||
pmulhuw %xmm4, %xmm5; /* Scale red and blue */\
|
||||
psrlw $8, %xmm3; /* Filter out alpha and green components */\
|
||||
pmullw %xmm4, %xmm3 /* Scale alpha and green */
|
||||
|
||||
|
||||
/*
|
||||
* void S32A_Opaque_BlitRow32_SSE4(SkPMColor* SK_RESTRICT dst,
|
||||
* const SkPMColor* SK_RESTRICT src,
|
||||
* int count, U8CPU alpha)
|
||||
*
|
||||
* This function is divided into six blocks: initialization, blit 4-15 pixels,
|
||||
* blit 0-3 pixels, align destination for 16+ pixel blits,
|
||||
* blit 16+ pixels with source unaligned, blit 16+ pixels with source aligned.
|
||||
* There are some code reuse between the blocks.
|
||||
*
|
||||
* The primary optimization comes from checking the source pixels' alpha value.
|
||||
* If the alpha is zero, the pixel can be skipped entirely.
|
||||
* If the alpha is fully opaque, the pixel can be copied directly to the destination.
|
||||
* According to collected statistics, these two cases are the most common.
|
||||
* The main loop(s) uses pre-loading and unrolling in an attempt to reduce the
|
||||
* memory latency worse-case.
|
||||
*/
|
||||
|
||||
#ifdef __clang__
|
||||
.text
|
||||
#else
|
||||
.section .text.sse4.2,"ax",@progbits
|
||||
.type S32A_Opaque_BlitRow32_SSE4_asm, @function
|
||||
#endif
|
||||
.global S32A_Opaque_BlitRow32_SSE4_asm
|
||||
.global _S32A_Opaque_BlitRow32_SSE4_asm
|
||||
|
||||
.p2align 4
|
||||
S32A_Opaque_BlitRow32_SSE4_asm:
|
||||
_S32A_Opaque_BlitRow32_SSE4_asm:
|
||||
.cfi_startproc
|
||||
prefetcht0 (%rsi)
|
||||
movl %edx, %ecx // Pixel count
|
||||
movq %rdi, %rdx // Destination pointer
|
||||
movq %rsi, %rax // Source pointer
|
||||
|
||||
// Setup SSE constants
|
||||
movdqa .LAlphaCheckMask(%rip), %xmm7 // 0xFF000000 mask to check alpha
|
||||
movdqa .LInverseAlphaCalc(%rip), %xmm6// 16-bit 256 to calculate inv. alpha
|
||||
movdqa .LResultMergeMask(%rip), %xmm0 // 0x00FF00FF mask (Must be in xmm0 because of pblendvb)
|
||||
|
||||
subl $4, %ecx // Check if we have only 0-3 pixels
|
||||
js .LReallySmall
|
||||
cmpl $11, %ecx // Do we have enough pixels to run the main loop?
|
||||
ja .LBigBlit
|
||||
|
||||
// Handle small blits (4-15 pixels)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
xorq %rdi, %rdi // Reset offset to zero
|
||||
|
||||
.LSmallLoop:
|
||||
lddqu (%rax, %rdi), %xmm1 // Load four source pixels
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LSmallAlphaNotOpaqueOrZero
|
||||
jz .LSmallAlphaZero
|
||||
movdqu %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
.LSmallAlphaZero:
|
||||
addq $16, %rdi
|
||||
subl $4, %ecx // Check if there are four additional pixels, at least
|
||||
jns .LSmallLoop
|
||||
jmp .LSmallRemaining
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
.p2align 4
|
||||
.LSmallAlphaNotOpaqueOrZero:
|
||||
lddqu (%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
addq $16, %rdi
|
||||
subl $4, %ecx // Check if we can store all four pixels
|
||||
pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqu %xmm1, -16(%rdx, %rdi) // Store four destination pixels
|
||||
jns .LSmallLoop
|
||||
|
||||
// Handle the last 0-3 pixels (also used by the big unaligned loop)
|
||||
.LSmallRemaining:
|
||||
cmpl $-4, %ecx // Check if we are done
|
||||
je .LSmallExit
|
||||
sall $2, %ecx // Calculate offset for last pixels
|
||||
movslq %ecx, %rcx
|
||||
addq %rcx, %rdi
|
||||
|
||||
lddqu (%rax, %rdi), %xmm1 // Load last four source pixels (overlapping)
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
jc .LSmallRemainingStoreAll// If all alphas are opaque, just store (overlapping)
|
||||
jz .LSmallExit // If all alphas are zero, skip the pixels completely
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
lddqu (%rdx, %rdi), %xmm5 // Load last four destination pixels (overlapping)
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
|
||||
psllw $8, %xmm3 // Filter out red and blue components
|
||||
pmulhuw %xmm4, %xmm3 // Scale red and blue
|
||||
movdqa %xmm5, %xmm2
|
||||
psrlw $8, %xmm2 // Filter out alpha and green components
|
||||
pmullw %xmm4, %xmm2 // Scale alpha and green
|
||||
|
||||
cmpl $-8, %ecx // Check how many pixels should be written
|
||||
pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm2, %xmm1 // Add source and destination pixels together
|
||||
jb .LSmallPixelsLeft1
|
||||
ja .LSmallPixelsLeft3 // To avoid double-blending the overlapping pixels...
|
||||
pblendw $0xF0, %xmm1, %xmm5 // Merge only the final two pixels to the destination
|
||||
movdqu %xmm5, (%rdx, %rdi) // Store last two destination pixels
|
||||
.LSmallExit:
|
||||
ret
|
||||
|
||||
.LSmallPixelsLeft1:
|
||||
pblendw $0xC0, %xmm1, %xmm5 // Merge only the final pixel to the destination
|
||||
movdqu %xmm5, (%rdx, %rdi) // Store last destination pixel
|
||||
ret
|
||||
|
||||
.LSmallPixelsLeft3:
|
||||
pblendw $0xFC, %xmm1, %xmm5 // Merge only the final three pixels to the destination
|
||||
movdqu %xmm5, (%rdx, %rdi) // Store last three destination pixels
|
||||
ret
|
||||
|
||||
.LSmallRemainingStoreAll:
|
||||
movdqu %xmm1, (%rdx, %rdi) // Store last destination pixels (overwrite)
|
||||
ret
|
||||
|
||||
// Handle really small blits (0-3 pixels)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.LReallySmall:
|
||||
addl $4, %ecx
|
||||
jle .LReallySmallExit
|
||||
pcmpeqd %xmm1, %xmm1
|
||||
cmpl $2, %ecx // Check how many pixels should be read
|
||||
pinsrd $0x0, (%rax), %xmm1 // Load one source pixel
|
||||
pinsrd $0x0, (%rdx), %xmm5 // Load one destination pixel
|
||||
jb .LReallySmallCalc
|
||||
pinsrd $0x1, 4(%rax), %xmm1 // Load second source pixel
|
||||
pinsrd $0x1, 4(%rdx), %xmm5 // Load second destination pixel
|
||||
je .LReallySmallCalc
|
||||
pinsrd $0x2, 8(%rax), %xmm1 // Load third source pixel
|
||||
pinsrd $0x2, 8(%rdx), %xmm5 // Load third destination pixel
|
||||
|
||||
.LReallySmallCalc:
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are opaque
|
||||
jc .LReallySmallStore // If all alphas are opaque, just store
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
|
||||
pand %xmm0, %xmm5 // Filter out red and blue components
|
||||
pmullw %xmm4, %xmm5 // Scale red and blue
|
||||
psrlw $8, %xmm3 // Filter out alpha and green components
|
||||
pmullw %xmm4, %xmm3 // Scale alpha and green
|
||||
|
||||
psrlw $8, %xmm5 // Combine results
|
||||
pblendvb %xmm5, %xmm3 // Mask in %xmm0, implicitly
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
|
||||
.LReallySmallStore:
|
||||
cmpl $2, %ecx // Check how many pixels should be written
|
||||
pextrd $0x0, %xmm1, (%rdx) // Store one destination pixel
|
||||
jb .LReallySmallExit
|
||||
pextrd $0x1, %xmm1, 4(%rdx) // Store second destination pixel
|
||||
je .LReallySmallExit
|
||||
pextrd $0x2, %xmm1, 8(%rdx) // Store third destination pixel
|
||||
.LReallySmallExit:
|
||||
ret
|
||||
|
||||
// Handle bigger blit operations (16+ pixels)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.p2align 4
|
||||
.LBigBlit:
|
||||
// Align destination?
|
||||
testl $0xF, %edx
|
||||
lddqu (%rax), %xmm1 // Pre-load four source pixels
|
||||
jz .LAligned
|
||||
|
||||
movq %rdx, %rdi // Calculate alignment of destination pointer
|
||||
negq %rdi
|
||||
andl $0xF, %edi
|
||||
|
||||
// Handle 1-3 pixels to align destination
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
jz .LAlignDone // If all alphas are zero, just skip
|
||||
lddqu (%rdx), %xmm5 // Load four destination pixels
|
||||
jc .LAlignStore // If all alphas are opaque, just store
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
|
||||
psllw $8, %xmm3 // Filter out red and blue components
|
||||
pmulhuw %xmm4, %xmm3 // Scale red and blue
|
||||
movdqa %xmm5, %xmm2
|
||||
psrlw $8, %xmm2 // Filter out alpha and green components
|
||||
pmullw %xmm4, %xmm2 // Scale alpha and green
|
||||
|
||||
pblendvb %xmm3, %xmm2 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm2, %xmm1 // Add source and destination pixels together
|
||||
|
||||
.LAlignStore:
|
||||
cmpl $8, %edi // Check how many pixels should be written
|
||||
jb .LAlignPixelsLeft1
|
||||
ja .LAlignPixelsLeft3
|
||||
pblendw $0x0F, %xmm1, %xmm5 // Blend two pixels
|
||||
jmp .LAlignStorePixels
|
||||
|
||||
.LAlignPixelsLeft1:
|
||||
pblendw $0x03, %xmm1, %xmm5 // Blend one pixel
|
||||
jmp .LAlignStorePixels
|
||||
|
||||
.LAlignPixelsLeft3:
|
||||
pblendw $0x3F, %xmm1, %xmm5 // Blend three pixels
|
||||
|
||||
.LAlignStorePixels:
|
||||
movdqu %xmm5, (%rdx) // Store destination pixels
|
||||
|
||||
.LAlignDone:
|
||||
addq %rdi, %rax // Adjust pointers and pixel count
|
||||
addq %rdi, %rdx
|
||||
shrq $2, %rdi
|
||||
lddqu (%rax), %xmm1 // Pre-load new source pixels (after alignment)
|
||||
subl %edi, %ecx
|
||||
|
||||
.LAligned: // Destination is guaranteed to be 16 byte aligned
|
||||
xorq %rdi, %rdi // Reset offset to zero
|
||||
subl $8, %ecx // Decrease counter (Reserve four pixels for the cleanup)
|
||||
testl $0xF, %eax // Check alignment of source pointer
|
||||
jz .LAlignedLoop
|
||||
|
||||
// Source not aligned to destination
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.p2align 4
|
||||
.LUnalignedLoop: // Main loop for unaligned, handles eight pixels per iteration
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero00
|
||||
lddqu 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
|
||||
jz .LAlphaZero00
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero00:
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero01
|
||||
lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero01
|
||||
movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero01:
|
||||
addq $32, %rdi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LUnalignedLoop
|
||||
addl $8, %ecx // Adjust pixel count
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero00:
|
||||
movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
lddqu 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
|
||||
// Handle next four pixels
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero01
|
||||
lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero02
|
||||
movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
|
||||
.LAlphaZero02:
|
||||
addq $32, %rdi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LUnalignedLoop
|
||||
addl $8, %ecx // Adjust pixel count
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero01:
|
||||
movdqa 16(%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
lddqu 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
addq $32, %rdi
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm2 // Add source and destination pixels together
|
||||
subl $8, %ecx
|
||||
movdqa %xmm2, -16(%rdx, %rdi) // Store four destination pixels
|
||||
jae .LUnalignedLoop
|
||||
addl $8, %ecx // Adjust pixel count
|
||||
|
||||
// Cleanup - handle pending pixels from loop
|
||||
.LLoopCleanup0:
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero02
|
||||
jz .LAlphaZero03
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
.LAlphaZero03:
|
||||
addq $16, %rdi
|
||||
subl $4, %ecx
|
||||
js .LSmallRemaining // Reuse code from small loop
|
||||
lddqu (%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
.LAlphaNotOpaqueOrZero02:
|
||||
movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
addq $16, %rdi
|
||||
subl $4, %ecx
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, -16(%rdx, %rdi) // Store four destination pixels
|
||||
js .LSmallRemaining // Reuse code from small loop
|
||||
lddqu (%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
jmp .LLoopCleanup0
|
||||
|
||||
// Source aligned to destination
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
.p2align 4
|
||||
.LAlignedLoop: // Main loop for aligned, handles eight pixels per iteration
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero10
|
||||
movdqa 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
|
||||
jz .LAlphaZero10
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero10:
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero11
|
||||
movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero11
|
||||
movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
|
||||
|
||||
.LAlphaZero11:
|
||||
addq $32, %rdi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LAlignedLoop
|
||||
jmp .LLoopCleanup1
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero10:
|
||||
movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
movdqa 16(%rax, %rdi), %xmm2 // Pre-load four source pixels
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
|
||||
// Handle next four pixels
|
||||
ptest %xmm7, %xmm2 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero11
|
||||
movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
jz .LAlphaZero12
|
||||
movdqa %xmm2, 16(%rdx, %rdi) // Store four destination pixels
|
||||
.LAlphaZero12:
|
||||
addq $32, %rdi // Adjust offset and pixel count
|
||||
subl $8, %ecx
|
||||
jae .LAlignedLoop
|
||||
jmp .LLoopCleanup1
|
||||
|
||||
.p2align 4
|
||||
.LAlphaNotOpaqueOrZero11:
|
||||
movdqa 16(%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm2, xmm1) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
movdqa 32(%rax, %rdi), %xmm1 // Pre-load four source pixels
|
||||
|
||||
addq $32, %rdi
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm2 // Add source and destination pixels together
|
||||
subl $8, %ecx
|
||||
movdqa %xmm2, -16(%rdx, %rdi) // Store four destination pixels
|
||||
jae .LAlignedLoop
|
||||
|
||||
// Cleanup - handle four pending pixels from loop
|
||||
.LLoopCleanup1:
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LAlphaNotOpaqueOrZero12
|
||||
jz .LAlphaZero13
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
.LAlphaZero13:
|
||||
addl $8, %ecx // Adjust offset and pixel count
|
||||
jz .LExit
|
||||
addq $16, %rdi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
.LAlphaNotOpaqueOrZero12:
|
||||
movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
addl $8, %ecx // Adjust offset and pixel count
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
jz .LExit
|
||||
addq $16, %rdi
|
||||
|
||||
// Handle last 1-7 pixels
|
||||
.LRemainLoop1:
|
||||
movdqa (%rax, %rdi), %xmm1 // Load four source pixels
|
||||
ptest %xmm7, %xmm1 // Check if all alphas are zero or opaque
|
||||
ja .LRemainAlphaNotOpaqueOrZero1
|
||||
jz .LRemainAlphaZero1
|
||||
|
||||
// All alphas were opaque (copy)
|
||||
subl $4, %ecx // Check if we have more than four pixels left
|
||||
jle .LRemainStore
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
addq $16, %rdi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
// All alphas were zero (skip)
|
||||
.p2align 4
|
||||
.LRemainAlphaZero1:
|
||||
subl $4, %ecx // Check if we have more than four pixels left
|
||||
jle .LExit
|
||||
addq $16, %rdi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
// Handle mixed alphas (calculate and scale)
|
||||
.p2align 4
|
||||
.LRemainAlphaNotOpaqueOrZero1:
|
||||
movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
EXTRACT_ALPHA(xmm1, xmm2) // Extract and clone alpha value
|
||||
SCALE_PIXELS // Scale pixels using alpha
|
||||
|
||||
subl $4, %ecx
|
||||
pblendvb %xmm5, %xmm3 // Combine results (mask in %xmm0, implicitly)
|
||||
paddb %xmm3, %xmm1 // Add source and destination pixels together
|
||||
jle .LRemainStore
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store four destination pixels
|
||||
addq $16, %rdi
|
||||
jmp .LRemainLoop1
|
||||
|
||||
// Store the last 1-4 pixels
|
||||
.p2align 4
|
||||
.LRemainStore:
|
||||
jz .LRemainFull
|
||||
movdqa (%rdx, %rdi), %xmm5 // Load four destination pixels
|
||||
cmpl $-2, %ecx // Check how many pixels should be written
|
||||
jb .LRemainPixelsLeft11
|
||||
ja .LRemainPixelsLeft13
|
||||
pblendw $0x0F, %xmm1, %xmm5
|
||||
movdqa %xmm5, (%rdx, %rdi) // Store last 2 destination pixels
|
||||
.LExit:
|
||||
ret
|
||||
|
||||
.LRemainPixelsLeft11:
|
||||
pblendw $0x03, %xmm1, %xmm5
|
||||
movdqa %xmm5, (%rdx, %rdi) // Store last destination pixel
|
||||
ret
|
||||
|
||||
.LRemainPixelsLeft13:
|
||||
pblendw $0x3F, %xmm1, %xmm5
|
||||
movdqa %xmm5, (%rdx, %rdi) // Store last 3 destination pixels
|
||||
ret
|
||||
|
||||
.LRemainFull:
|
||||
movdqa %xmm1, (%rdx, %rdi) // Store last 4 destination pixels
|
||||
ret
|
||||
|
||||
.cfi_endproc
|
||||
#ifndef __clang__
|
||||
.size S32A_Opaque_BlitRow32_SSE4_asm, .-S32A_Opaque_BlitRow32_SSE4_asm
|
||||
#endif
|
||||
|
||||
// Constants for SSE code
|
||||
#ifndef __clang__
|
||||
.section .rodata
|
||||
#endif
|
||||
.p2align 4
|
||||
.LAlphaCheckMask:
|
||||
.long 0xFF000000, 0xFF000000, 0xFF000000, 0xFF000000
|
||||
.LInverseAlphaCalc:
|
||||
.word 256, 256, 256, 256, 256, 256, 256, 256
|
||||
.LResultMergeMask:
|
||||
.long 0x00FF00FF, 0x00FF00FF, 0x00FF00FF, 0x00FF00FF
|
||||
#endif
|
@ -12,6 +12,7 @@
|
||||
#include "SkBlitRect_opts_SSE2.h"
|
||||
#include "SkBlitRow.h"
|
||||
#include "SkBlitRow_opts_SSE2.h"
|
||||
#include "SkBlitRow_opts_SSE4.h"
|
||||
#include "SkBlurImage_opts_SSE2.h"
|
||||
#include "SkMorphology_opts.h"
|
||||
#include "SkMorphology_opts_SSE2.h"
|
||||
@ -82,6 +83,8 @@ static int get_SIMD_level() {
|
||||
getcpuid(1, cpu_info);
|
||||
if ((cpu_info[2] & (1<<20)) != 0) {
|
||||
return SK_CPU_SSE_LEVEL_SSE42;
|
||||
} else if ((cpu_info[2] & (1<<19)) != 0) {
|
||||
return SK_CPU_SSE_LEVEL_SSE41;
|
||||
} else if ((cpu_info[2] & (1<<9)) != 0) {
|
||||
return SK_CPU_SSE_LEVEL_SSSE3;
|
||||
} else if ((cpu_info[3] & (1<<26)) != 0) {
|
||||
@ -206,16 +209,30 @@ SkBlitRow::Proc SkBlitRow::PlatformProcs565(unsigned flags) {
|
||||
}
|
||||
}
|
||||
|
||||
static SkBlitRow::Proc32 platform_32_procs[] = {
|
||||
static SkBlitRow::Proc32 platform_32_procs_SSE2[] = {
|
||||
NULL, // S32_Opaque,
|
||||
S32_Blend_BlitRow32_SSE2, // S32_Blend,
|
||||
S32A_Opaque_BlitRow32_SSE2, // S32A_Opaque
|
||||
S32A_Blend_BlitRow32_SSE2, // S32A_Blend,
|
||||
};
|
||||
|
||||
#if defined(SK_ATT_ASM_SUPPORTED)
|
||||
static SkBlitRow::Proc32 platform_32_procs_SSE4[] = {
|
||||
NULL, // S32_Opaque,
|
||||
S32_Blend_BlitRow32_SSE2, // S32_Blend,
|
||||
S32A_Opaque_BlitRow32_SSE4_asm, // S32A_Opaque
|
||||
S32A_Blend_BlitRow32_SSE2, // S32A_Blend,
|
||||
};
|
||||
#endif
|
||||
|
||||
SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) {
|
||||
#if defined(SK_ATT_ASM_SUPPORTED)
|
||||
if (supports_simd(SK_CPU_SSE_LEVEL_SSE41)) {
|
||||
return platform_32_procs_SSE4[flags];
|
||||
} else
|
||||
#endif
|
||||
if (supports_simd(SK_CPU_SSE_LEVEL_SSE2)) {
|
||||
return platform_32_procs[flags];
|
||||
return platform_32_procs_SSE2[flags];
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user