arm: CVE-2020-6096: Fix multiarch memcpy for negative length [BZ #25620]

Unsigned branch instructions could be used for r2 to fix the wrong
behavior when a negative length is passed to memcpy.
This commit fixes the armv7 version.
This commit is contained in:
Alexander Anisimov 2020-07-08 14:18:31 +02:00 committed by Florian Weimer
parent 79a4fa341b
commit beea361050

View File

@ -268,7 +268,7 @@ ENTRY(memcpy)
mov dst, dstin /* Preserve dstin, we need to return it. */ mov dst, dstin /* Preserve dstin, we need to return it. */
cmp count, #64 cmp count, #64
bge .Lcpy_not_short bhs .Lcpy_not_short
/* Deal with small copies quickly by dropping straight into the /* Deal with small copies quickly by dropping straight into the
exit block. */ exit block. */
@ -351,10 +351,10 @@ ENTRY(memcpy)
1: 1:
subs tmp2, count, #64 /* Use tmp2 for count. */ subs tmp2, count, #64 /* Use tmp2 for count. */
blt .Ltail63aligned blo .Ltail63aligned
cmp tmp2, #512 cmp tmp2, #512
bge .Lcpy_body_long bhs .Lcpy_body_long
.Lcpy_body_medium: /* Count in tmp2. */ .Lcpy_body_medium: /* Count in tmp2. */
#ifdef USE_VFP #ifdef USE_VFP
@ -378,7 +378,7 @@ ENTRY(memcpy)
add src, src, #64 add src, src, #64
vstr d1, [dst, #56] vstr d1, [dst, #56]
add dst, dst, #64 add dst, dst, #64
bge 1b bhs 1b
tst tmp2, #0x3f tst tmp2, #0x3f
beq .Ldone beq .Ldone
@ -412,7 +412,7 @@ ENTRY(memcpy)
ldrd A_l, A_h, [src, #64]! ldrd A_l, A_h, [src, #64]!
strd A_l, A_h, [dst, #64]! strd A_l, A_h, [dst, #64]!
subs tmp2, tmp2, #64 subs tmp2, tmp2, #64
bge 1b bhs 1b
tst tmp2, #0x3f tst tmp2, #0x3f
bne 1f bne 1f
ldr tmp2,[sp], #FRAME_SIZE ldr tmp2,[sp], #FRAME_SIZE
@ -482,7 +482,7 @@ ENTRY(memcpy)
add src, src, #32 add src, src, #32
subs tmp2, tmp2, #prefetch_lines * 64 * 2 subs tmp2, tmp2, #prefetch_lines * 64 * 2
blt 2f blo 2f
1: 1:
cpy_line_vfp d3, 0 cpy_line_vfp d3, 0
cpy_line_vfp d4, 64 cpy_line_vfp d4, 64
@ -494,7 +494,7 @@ ENTRY(memcpy)
add dst, dst, #2 * 64 add dst, dst, #2 * 64
add src, src, #2 * 64 add src, src, #2 * 64
subs tmp2, tmp2, #prefetch_lines * 64 subs tmp2, tmp2, #prefetch_lines * 64
bge 1b bhs 1b
2: 2:
cpy_tail_vfp d3, 0 cpy_tail_vfp d3, 0
@ -615,8 +615,8 @@ ENTRY(memcpy)
1: 1:
pld [src, #(3 * 64)] pld [src, #(3 * 64)]
subs count, count, #64 subs count, count, #64
ldrmi tmp2, [sp], #FRAME_SIZE ldrlo tmp2, [sp], #FRAME_SIZE
bmi .Ltail63unaligned blo .Ltail63unaligned
pld [src, #(4 * 64)] pld [src, #(4 * 64)]
#ifdef USE_NEON #ifdef USE_NEON
@ -633,7 +633,7 @@ ENTRY(memcpy)
neon_load_multi d0-d3, src neon_load_multi d0-d3, src
neon_load_multi d4-d7, src neon_load_multi d4-d7, src
subs count, count, #64 subs count, count, #64
bmi 2f blo 2f
1: 1:
pld [src, #(4 * 64)] pld [src, #(4 * 64)]
neon_store_multi d0-d3, dst neon_store_multi d0-d3, dst
@ -641,7 +641,7 @@ ENTRY(memcpy)
neon_store_multi d4-d7, dst neon_store_multi d4-d7, dst
neon_load_multi d4-d7, src neon_load_multi d4-d7, src
subs count, count, #64 subs count, count, #64
bpl 1b bhs 1b
2: 2:
neon_store_multi d0-d3, dst neon_store_multi d0-d3, dst
neon_store_multi d4-d7, dst neon_store_multi d4-d7, dst