powerpc: Rearrange cfi_offset calls
This patch rearranges cfi_offset() calls after the last store so as to avoid extra DW_CFA_advance opcodes in unwind information.
This commit is contained in:
parent
f60ee13f5d
commit
869d7180dd
10
ChangeLog
10
ChangeLog
@ -1,3 +1,13 @@
|
||||
2016-03-11 Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com>
|
||||
|
||||
* sysdeps/powerpc/powerpc32/power4/memcmp.S (memcmp): Rearrange
|
||||
cfi_offset calls.
|
||||
* sysdeps/powerpc/powerpc32/power6/memcpy.S (memcpy): Likewise.
|
||||
* sysdeps/powerpc/powerpc32/power7/memcmp.S (memcmp): Likewise.
|
||||
* sysdeps/powerpc/powerpc64/power4/memcmp.S (memcmp): Likewise.
|
||||
* sysdeps/powerpc/powerpc64/power7/memcmp.S (memcmp): Likewise.
|
||||
* sysdeps/powerpc/powerpc64/power7/strstr.S (strstr): Likewise.
|
||||
|
||||
2016-03-10 Carlos O'Donell <carlos@redhat.com>
|
||||
|
||||
* timezone/checktab.awk: Removed.
|
||||
|
@ -54,8 +54,8 @@ EALIGN (memcmp, 4, 0)
|
||||
stwu 1, -64(r1)
|
||||
cfi_adjust_cfa_offset(64)
|
||||
stw rWORD8, 48(r1)
|
||||
cfi_offset(rWORD8, (48-64))
|
||||
stw rWORD7, 44(r1)
|
||||
cfi_offset(rWORD8, (48-64))
|
||||
cfi_offset(rWORD7, (44-64))
|
||||
bne L(unaligned)
|
||||
/* At this point we know both strings have the same alignment and the
|
||||
@ -747,18 +747,18 @@ L(unaligned):
|
||||
the actual start of rSTR2. */
|
||||
clrrwi rSTR2, rSTR2, 2
|
||||
stw rWORD2_SHIFT, 28(r1)
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
/* Compute the left/right shift counts for the unaligned rSTR2,
|
||||
compensating for the logical (W aligned) start of rSTR1. */
|
||||
clrlwi rSHL, rWORD8_SHIFT, 30
|
||||
clrrwi rSTR1, rSTR1, 2
|
||||
stw rWORD4_SHIFT, 24(r1)
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
slwi rSHL, rSHL, 3
|
||||
cmplw cr5, rWORD8_SHIFT, rSTR2
|
||||
add rN, rN, r12
|
||||
slwi rWORD6, r12, 3
|
||||
stw rWORD6_SHIFT, 20(r1)
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
cfi_offset(rWORD6_SHIFT, (20-64))
|
||||
subfic rSHR, rSHL, 32
|
||||
srwi r0, rN, 4 /* Divide by 16 */
|
||||
@ -852,15 +852,15 @@ L(duPs4):
|
||||
.align 4
|
||||
L(Wunaligned):
|
||||
stw rWORD8_SHIFT, 32(r1)
|
||||
cfi_offset(rWORD8_SHIFT, (32-64))
|
||||
clrrwi rSTR2, rSTR2, 2
|
||||
stw rWORD2_SHIFT, 28(r1)
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
srwi r0, rN, 4 /* Divide by 16 */
|
||||
stw rWORD4_SHIFT, 24(r1)
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
andi. r12, rN, 12 /* Get the W remainder */
|
||||
stw rWORD6_SHIFT, 20(r1)
|
||||
cfi_offset(rWORD8_SHIFT, (32-64))
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
cfi_offset(rWORD6_SHIFT, (20-64))
|
||||
slwi rSHL, rSHL, 3
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
|
@ -46,8 +46,8 @@ EALIGN (memcpy, 5, 0)
|
||||
ble- cr1,L(word_unaligned_short) /* If move < 32 bytes. */
|
||||
cmplw cr6,10,11
|
||||
stw 31,24(1)
|
||||
cfi_offset(31,(24-32))
|
||||
stw 30,20(1)
|
||||
cfi_offset(31,(24-32))
|
||||
cfi_offset(30,(20-32))
|
||||
mr 30,3
|
||||
beq .L0
|
||||
|
@ -54,8 +54,8 @@ EALIGN (memcmp, 4, 0)
|
||||
stwu 1, -64(r1)
|
||||
cfi_adjust_cfa_offset(64)
|
||||
stw rWORD8, 48(r1)
|
||||
cfi_offset(rWORD8, (48-64))
|
||||
stw rWORD7, 44(r1)
|
||||
cfi_offset(rWORD8, (48-64))
|
||||
cfi_offset(rWORD7, (44-64))
|
||||
bne L(unaligned)
|
||||
/* At this point we know both strings have the same alignment and the
|
||||
@ -747,18 +747,18 @@ L(unaligned):
|
||||
the actual start of rSTR2. */
|
||||
clrrwi rSTR2, rSTR2, 2
|
||||
stw rWORD2_SHIFT, 28(r1)
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
/* Compute the left/right shift counts for the unaligned rSTR2,
|
||||
compensating for the logical (W aligned) start of rSTR1. */
|
||||
clrlwi rSHL, rWORD8_SHIFT, 30
|
||||
clrrwi rSTR1, rSTR1, 2
|
||||
stw rWORD4_SHIFT, 24(r1)
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
slwi rSHL, rSHL, 3
|
||||
cmplw cr5, rWORD8_SHIFT, rSTR2
|
||||
add rN, rN, r12
|
||||
slwi rWORD6, r12, 3
|
||||
stw rWORD6_SHIFT, 20(r1)
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
cfi_offset(rWORD6_SHIFT, (20-64))
|
||||
subfic rSHR, rSHL, 32
|
||||
srwi r0, rN, 4 /* Divide by 16 */
|
||||
@ -852,15 +852,15 @@ L(duPs4):
|
||||
.align 4
|
||||
L(Wunaligned):
|
||||
stw rWORD8_SHIFT, 32(r1)
|
||||
cfi_offset(rWORD8_SHIFT, (32-64))
|
||||
clrrwi rSTR2, rSTR2, 2
|
||||
stw rWORD2_SHIFT, 28(r1)
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
srwi r0, rN, 4 /* Divide by 16 */
|
||||
stw rWORD4_SHIFT, 24(r1)
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
andi. r12, rN, 12 /* Get the W remainder */
|
||||
stw rWORD6_SHIFT, 20(r1)
|
||||
cfi_offset(rWORD8_SHIFT, (32-64))
|
||||
cfi_offset(rWORD2_SHIFT, (28-64))
|
||||
cfi_offset(rWORD4_SHIFT, (24-64))
|
||||
cfi_offset(rWORD6_SHIFT, (20-64))
|
||||
slwi rSHL, rSHL, 3
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
|
@ -52,8 +52,8 @@ EALIGN (memcmp, 4, 0)
|
||||
byte loop. */
|
||||
blt cr1, L(bytealigned)
|
||||
std rWORD8, -8(r1)
|
||||
cfi_offset(rWORD8, -8)
|
||||
std rWORD7, -16(r1)
|
||||
cfi_offset(rWORD8, -8)
|
||||
cfi_offset(rWORD7, -16)
|
||||
bne L(unaligned)
|
||||
/* At this point we know both strings have the same alignment and the
|
||||
@ -728,18 +728,18 @@ L(unaligned):
|
||||
the actual start of rSTR2. */
|
||||
clrrdi rSTR2, rSTR2, 3
|
||||
std rWORD2_SHIFT, -48(r1)
|
||||
cfi_offset(rWORD2_SHIFT, -48)
|
||||
/* Compute the left/right shift counts for the unaligned rSTR2,
|
||||
compensating for the logical (DW aligned) start of rSTR1. */
|
||||
clrldi rSHL, rWORD8_SHIFT, 61
|
||||
clrrdi rSTR1, rSTR1, 3
|
||||
std rWORD4_SHIFT, -56(r1)
|
||||
cfi_offset(rWORD4_SHIFT, -56)
|
||||
sldi rSHL, rSHL, 3
|
||||
cmpld cr5, rWORD8_SHIFT, rSTR2
|
||||
add rN, rN, r12
|
||||
sldi rWORD6, r12, 3
|
||||
std rWORD6_SHIFT, -64(r1)
|
||||
cfi_offset(rWORD2_SHIFT, -48)
|
||||
cfi_offset(rWORD4_SHIFT, -56)
|
||||
cfi_offset(rWORD6_SHIFT, -64)
|
||||
subfic rSHR, rSHL, 64
|
||||
srdi r0, rN, 5 /* Divide by 32 */
|
||||
@ -833,15 +833,15 @@ L(duPs4):
|
||||
.align 4
|
||||
L(DWunaligned):
|
||||
std rWORD8_SHIFT, -40(r1)
|
||||
cfi_offset(rWORD8_SHIFT, -40)
|
||||
clrrdi rSTR2, rSTR2, 3
|
||||
std rWORD2_SHIFT, -48(r1)
|
||||
cfi_offset(rWORD2_SHIFT, -48)
|
||||
srdi r0, rN, 5 /* Divide by 32 */
|
||||
std rWORD4_SHIFT, -56(r1)
|
||||
cfi_offset(rWORD4_SHIFT, -56)
|
||||
andi. r12, rN, 24 /* Get the DW remainder */
|
||||
std rWORD6_SHIFT, -64(r1)
|
||||
cfi_offset(rWORD8_SHIFT, -40)
|
||||
cfi_offset(rWORD2_SHIFT, -48)
|
||||
cfi_offset(rWORD4_SHIFT, -56)
|
||||
cfi_offset(rWORD6_SHIFT, -64)
|
||||
sldi rSHL, rSHL, 3
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
|
@ -82,17 +82,17 @@ EALIGN (memcmp, 4, 0)
|
||||
byte loop. */
|
||||
blt cr1, L(bytealigned)
|
||||
std rWORD8, rWORD8SAVE(r1)
|
||||
cfi_offset(rWORD8, rWORD8SAVE)
|
||||
std rWORD7, rWORD7SAVE(r1)
|
||||
cfi_offset(rWORD7, rWORD7SAVE)
|
||||
std rOFF8, rOFF8SAVE(r1)
|
||||
cfi_offset(rWORD7, rOFF8SAVE)
|
||||
std rOFF16, rOFF16SAVE(r1)
|
||||
cfi_offset(rWORD7, rOFF16SAVE)
|
||||
std rOFF24, rOFF24SAVE(r1)
|
||||
cfi_offset(rWORD7, rOFF24SAVE)
|
||||
std rOFF32, rOFF32SAVE(r1)
|
||||
cfi_offset(rWORD7, rOFF32SAVE)
|
||||
cfi_offset(rWORD8, rWORD8SAVE)
|
||||
cfi_offset(rWORD7, rWORD7SAVE)
|
||||
cfi_offset(rOFF8, rOFF8SAVE)
|
||||
cfi_offset(rOFF16, rOFF16SAVE)
|
||||
cfi_offset(rOFF24, rOFF24SAVE)
|
||||
cfi_offset(rOFF32, rOFF32SAVE)
|
||||
|
||||
li rOFF8,8
|
||||
li rOFF16,16
|
||||
@ -601,18 +601,18 @@ L(unaligned):
|
||||
the actual start of rSTR2. */
|
||||
clrrdi rSTR2, rSTR2, 3
|
||||
std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
|
||||
cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
|
||||
/* Compute the left/right shift counts for the unaligned rSTR2,
|
||||
compensating for the logical (DW aligned) start of rSTR1. */
|
||||
clrldi rSHL, rWORD8_SHIFT, 61
|
||||
clrrdi rSTR1, rSTR1, 3
|
||||
std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
|
||||
cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
|
||||
sldi rSHL, rSHL, 3
|
||||
cmpld cr5, rWORD8_SHIFT, rSTR2
|
||||
add rN, rN, r12
|
||||
sldi rWORD6, r12, 3
|
||||
std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
|
||||
cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
|
||||
cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
|
||||
cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
|
||||
subfic rSHR, rSHL, 64
|
||||
srdi r0, rN, 5 /* Divide by 32 */
|
||||
@ -689,15 +689,15 @@ L(duPs4):
|
||||
.align 4
|
||||
L(DWunaligned):
|
||||
std rWORD8_SHIFT, rWORD8SHIFTSAVE(r1)
|
||||
cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
|
||||
clrrdi rSTR2, rSTR2, 3
|
||||
std rWORD2_SHIFT, rWORD2SHIFTSAVE(r1)
|
||||
cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
|
||||
srdi r0, rN, 5 /* Divide by 32 */
|
||||
std rWORD4_SHIFT, rWORD4SHIFTSAVE(r1)
|
||||
cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
|
||||
andi. r12, rN, 24 /* Get the DW remainder */
|
||||
std rWORD6_SHIFT, rWORD6SHIFTSAVE(r1)
|
||||
cfi_offset(rWORD8_SHIFT, rWORD8SHIFTSAVE)
|
||||
cfi_offset(rWORD2_SHIFT, rWORD2SHIFTSAVE)
|
||||
cfi_offset(rWORD4_SHIFT, rWORD4SHIFTSAVE)
|
||||
cfi_offset(rWORD6_SHIFT, rWORD6SHIFTSAVE)
|
||||
sldi rSHL, rSHL, 3
|
||||
LD rWORD6, 0, rSTR2
|
||||
|
@ -59,14 +59,14 @@ EALIGN (strstr, 4, 0)
|
||||
CALL_MCOUNT 2
|
||||
mflr r0 /* Load link register LR to r0. */
|
||||
std r31, -8(r1) /* Save callers register r31. */
|
||||
cfi_offset(r31, -8)
|
||||
std r30, -16(r1) /* Save callers register r30. */
|
||||
cfi_offset(r30, -16)
|
||||
std r29, -24(r1) /* Save callers register r29. */
|
||||
cfi_offset(r29, -24)
|
||||
std r28, -32(r1) /* Save callers register r28. */
|
||||
cfi_offset(r28, -32)
|
||||
std r0, 16(r1) /* Store the link register. */
|
||||
cfi_offset(r31, -8)
|
||||
cfi_offset(r30, -16)
|
||||
cfi_offset(r28, -32)
|
||||
cfi_offset(r29, -24)
|
||||
cfi_offset(lr, 16)
|
||||
stdu r1, -FRAMESIZE(r1) /* Create the stack frame. */
|
||||
cfi_adjust_cfa_offset(FRAMESIZE)
|
||||
|
Loading…
x
Reference in New Issue
Block a user