mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-27 07:20:11 +00:00
a6b7502ec0
No bug. The optimizations are as follows: 1) Always align entry to 64 bytes. This makes behavior more predictable and makes other frontend optimizations easier. 2) Make the L(more_8x_vec) cases 4k aliasing aware. This can have significant benefits in the case that: 0 < (dst - src) < [256, 512] 3) Align before `rep movsb`. For ERMS this is roughly a [0, 30%] improvement and for FSRM [-10%, 25%]. In addition to these primary changes there is general cleanup throughout to optimize the aligning routines and control flow logic. Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com> Reviewed-by: H.J. Lu <hjl.tools@gmail.com>
34 lines
727 B
ArmAsm
34 lines
727 B
ArmAsm
#if IS_IN (libc)
|
|
# define VEC_SIZE 32
|
|
# define XMM0 xmm16
|
|
# define XMM1 xmm17
|
|
# define YMM0 ymm16
|
|
# define YMM1 ymm17
|
|
# define VEC0 ymm16
|
|
# define VEC1 ymm17
|
|
# define VEC2 ymm18
|
|
# define VEC3 ymm19
|
|
# define VEC4 ymm20
|
|
# define VEC5 ymm21
|
|
# define VEC6 ymm22
|
|
# define VEC7 ymm23
|
|
# define VEC8 ymm24
|
|
# define VEC9 ymm25
|
|
# define VEC10 ymm26
|
|
# define VEC11 ymm27
|
|
# define VEC12 ymm28
|
|
# define VEC13 ymm29
|
|
# define VEC14 ymm30
|
|
# define VEC15 ymm31
|
|
# define VEC(i) VEC##i
|
|
# define VMOVNT vmovntdq
|
|
# define VMOVU vmovdqu64
|
|
# define VMOVA vmovdqa64
|
|
# define VZEROUPPER
|
|
# define MOV_SIZE 6
|
|
# define SECTION(p) p##.evex
|
|
# define MEMMOVE_SYMBOL(p,s) p##_evex_##s
|
|
|
|
# include "memmove-vec-unaligned-erms.S"
|
|
#endif
|