[RFC PATCH v3 3/3] softirq: defer softirq processing to ksoftirqd if CPU is busy with RT
From: John Stultz
Date: Tue Sep 20 2022 - 21:27:40 EST
From: Pavankumar Kondeti <pkondeti@xxxxxxxxxxxxxx>
Defer the softirq processing to ksoftirqd if a RT task is
running or queued on the current CPU. This complements the RT
task placement algorithm which tries to find a CPU that is not
currently busy with softirqs.
Currently NET_TX, NET_RX, BLOCK and TASKLET softirqs are only
deferred as they can potentially run for long time.
Additionally, this patch stubs out ksoftirqd_running() logic,
in the CONFIG_RT_SOFTIRQ_OPTIMIZATION case, as deferring
potentially long-running softirqs will cause the logic to not
process shorter-running softirqs immediately. By stubbing it out
the potentially long running softirqs are deferred, but the
shorter running ones can still run immediately.
This patch includes folded-in fixes by:
Lingutla Chandrasekhar <clingutla@xxxxxxxxxxxxxx>
Satya Durga Srinivasu Prabhala <satyap@xxxxxxxxxxxxxx>
J. Avila <elavila@xxxxxxxxxx>
Cc: John Dias <joaodias@xxxxxxxxxx>
Cc: Connor O'Brien <connoro@xxxxxxxxxx>
Cc: Rick Yiu <rickyiu@xxxxxxxxxx>
Cc: John Kacur <jkacur@xxxxxxxxxx>
Cc: Qais Yousef <qais.yousef@xxxxxxx>
Cc: Chris Redpath <chris.redpath@xxxxxxx>
Cc: Abhijeet Dharmapurikar <adharmap@xxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Juri Lelli <juri.lelli@xxxxxxxxxx>
Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
Cc: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: kernel-team@xxxxxxxxxxx
Signed-off-by: Pavankumar Kondeti <pkondeti@xxxxxxxxxxxxxx>
[satyap@xxxxxxxxxxxxxx: trivial merge conflict resolution.]
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@xxxxxxxxxxxxxx>
[elavila: Port to mainline, squash with bugfix]
Signed-off-by: J. Avila <elavila@xxxxxxxxxx>
[jstultz: Rebase to linus/HEAD, minor rearranging of code,
included bug fix Reported-by: Qais Yousef <qais.yousef@xxxxxxx> ]
Signed-off-by: John Stultz <jstultz@xxxxxxxxxx>
---
include/linux/sched.h | 10 ++++++++++
kernel/sched/cpupri.c | 13 +++++++++++++
kernel/softirq.c | 25 +++++++++++++++++++++++--
3 files changed, 46 insertions(+), 2 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e7b2f8a5c711..7f76371cbbb0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1826,6 +1826,16 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
+
+#ifdef CONFIG_RT_SOFTIRQ_OPTIMIZATION
+extern bool cpupri_check_rt(void);
+#else
+static inline bool cpupri_check_rt(void)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index fa9ce9d83683..18dc75d16951 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -64,6 +64,19 @@ static int convert_prio(int prio)
return cpupri;
}
+#ifdef CONFIG_RT_SOFTIRQ_OPTIMIZATION
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+ int cpu = raw_smp_processor_id();
+
+ return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
+#endif
+
static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask, int idx)
{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 35ee79dd8786..203a70dc9459 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -87,6 +87,7 @@ static void wakeup_softirqd(void)
wake_up_process(tsk);
}
+#ifndef CONFIG_RT_SOFTIRQ_OPTIMIZATION
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness,
@@ -101,6 +102,9 @@ static bool ksoftirqd_running(unsigned long pending)
return false;
return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
}
+#else
+#define ksoftirqd_running(pending) (false)
+#endif /* CONFIG_RT_SOFTIRQ_OPTIMIZATION */
#ifdef CONFIG_TRACE_IRQFLAGS
DEFINE_PER_CPU(int, hardirqs_enabled);
@@ -532,6 +536,17 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
+static __u32 softirq_deferred_for_rt(__u32 *pending)
+{
+ __u32 deferred = 0;
+
+ if (cpupri_check_rt()) {
+ deferred = *pending & LONG_SOFTIRQ_MASK;
+ *pending &= ~LONG_SOFTIRQ_MASK;
+ }
+ return deferred;
+}
+
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -539,6 +554,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
+ __u32 deferred;
__u32 pending;
int softirq_bit;
@@ -551,13 +567,15 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
pending = local_softirq_pending();
+ deferred = softirq_deferred_for_rt(&pending);
softirq_handle_begin();
+
in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
restart:
/* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
+ set_softirq_pending(deferred);
__this_cpu_write(active_softirqs, pending);
local_irq_enable();
@@ -596,13 +614,16 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
local_irq_disable();
pending = local_softirq_pending();
+ deferred = softirq_deferred_for_rt(&pending);
+
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
--max_restart)
goto restart;
+ }
+ if (pending | deferred)
wakeup_softirqd();
- }
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
--
2.37.3.968.ga6b4b080e4-goog