[PATCH] powerpc/32: don't restore r0, r6-r8 on exception entry path after trace_hardirqs_off()

From: Christophe Leroy
Date: Tue Jan 07 2020 - 04:16:45 EST


Since commit b86fb88855ea ("powerpc/32: implement fast entry for
syscalls on non BOOKE") and commit 1a4b739bbb4f ("powerpc/32:
implement fast entry for syscalls on BOOKE"), syscalls don't
use the exception entry path anymore. It is therefore pointless
to restore r0 and r6-r8 after calling trace_hardirqs_off().

In the meantime, drop the '2:' label which is unused and misleading.

Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxx>
---
arch/powerpc/kernel/entry_32.S | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4a7cd22a8aaf..748a13788b9b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -242,9 +242,8 @@ reenable_mmu:
* r3 can be different from GPR3(r1) at this point, r9 and r11
* contains the old MSR and handler address respectively,
* r4 & r5 can contain page fault arguments that need to be passed
- * along as well. r12, CCR, CTR, XER etc... are left clobbered as
- * they aren't useful past this point (aren't syscall arguments),
- * the rest is restored from the exception frame.
+ * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
+ * clobbered as they aren't useful past this point.
*/

stwu r1,-32(r1)
@@ -258,16 +257,12 @@ reenable_mmu:
* lockdep
*/
1: bl trace_hardirqs_off
-2: lwz r5,24(r1)
+ lwz r5,24(r1)
lwz r4,20(r1)
lwz r3,16(r1)
lwz r11,12(r1)
lwz r9,8(r1)
addi r1,r1,32
- lwz r0,GPR0(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
mtctr r11
mtlr r9
bctr /* jump to handler */
--
2.13.3