[tip: sched/urgent] sched/rt, ARM: Use CONFIG_PREEMPTION
From: tip-bot2 for Thomas Gleixner
Date: Sun Dec 08 2019 - 09:59:09 EST
The following commit has been merged into the sched/urgent branch of tip:
Commit-ID: e7289c6de81c8e8991148e46c9ab43e2d23940f3
Gitweb: https://git.kernel.org/tip/e7289c6de81c8e8991148e46c9ab43e2d23940f3
Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
AuthorDate: Tue, 15 Oct 2019 21:17:48 +02:00
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitterDate: Sun, 08 Dec 2019 14:37:32 +01:00
sched/rt, ARM: Use CONFIG_PREEMPTION
CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT.
Both PREEMPT and PREEMPT_RT require the same functionality which today
depends on CONFIG_PREEMPT.
Switch the entry code, cache over to use CONFIG_PREEMPTION and add output
in show_stack() for PREEMPT_RT.
[bigeasy: +traps.c]
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Russell King <linux@xxxxxxxxxxxxxxx>
Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx
Link: https://lore.kernel.org/r/20191015191821.11479-2-bigeasy@xxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/arm/include/asm/switch_to.h | 2 +-
arch/arm/kernel/entry-armv.S | 4 ++--
arch/arm/kernel/traps.c | 2 ++
arch/arm/mm/cache-v7.S | 4 ++--
arch/arm/mm/cache-v7m.S | 4 ++--
5 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index d3e937d..007d8fe 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -10,7 +10,7 @@
* to ensure that the maintenance completes in case we migrate to another
* CPU.
*/
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
#define __complete_pending_tlbi() dsb(ish)
#else
#define __complete_pending_tlbi()
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 858d4e5..77f5483 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -211,7 +211,7 @@ __irq_svc:
svc_entry
irq_handler
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
@@ -226,7 +226,7 @@ ENDPROC(__irq_svc)
.ltorg
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
svc_preempt:
mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index c053abd..abb7dd7 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -248,6 +248,8 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
+#elif defined(CONFIG_PREEMPT_RT)
+#define S_PREEMPT " PREEMPT_RT"
#else
#define S_PREEMPT ""
#endif
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 0ee8fc4..dc8f152 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -135,13 +135,13 @@ flush_levels:
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
#endif
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
restore_irqs_notrace r9
#endif
and r2, r1, #7 @ extract the length of the cache lines
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
index a0035c4..1bc3a0a 100644
--- a/arch/arm/mm/cache-v7m.S
+++ b/arch/arm/mm/cache-v7m.S
@@ -183,13 +183,13 @@ flush_levels:
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
#endif
write_csselr r10, r1 @ set current cache level
isb @ isb to sych the new cssr&csidr
read_ccsidr r1 @ read the new csidr
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
restore_irqs_notrace r9
#endif
and r2, r1, #7 @ extract the length of the cache lines