[RFC PATCH 24/30] softirq: Introduce Local_bh_enter/exit()

From: Frederic Weisbecker
Date: Wed Oct 10 2018 - 19:13:52 EST


From: Frederic Weisbecker <fweisbec@xxxxxxxxx>

So far, the state of handling the disablement of softirqs and processing
their callbacks have been handled the same: increment the softirq offset,
trace softirqs off, preempt off, etc...

The only difference remains in the way the preempt count is incremented:
by 1 for softirq processing (can't nest as softirqs processing aren't
re-entrant) and by 2 for softirq disablement (can nest).

Now their behaviour is going to drift entirely. Softirq processing will
need to be reentrant and accept stacking SOFTIRQ_OFFSET increments.
OTOH softirq disablement will be driven by the vector enabled mask and
toggled only once any vector get disabled.

Maintaining both behaviours under the same handler is going to be messy,
so move the preempt count related code on softirq processing to its own
handlers.

Signed-off-by: Frederic Weisbecker <fweisbec@xxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: David S. Miller <davem@xxxxxxxxxxxxx>
Cc: Mauro Carvalho Chehab <mchehab@xxxxxxxxxxxxxxxx>
Cc: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
---
kernel/softirq.c | 74 ++++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 58 insertions(+), 16 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index ae9e29f..22cc0a7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -139,19 +139,6 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
EXPORT_SYMBOL(__local_bh_disable_ip);
#endif /* CONFIG_TRACE_IRQFLAGS */

-static void __local_bh_enable(unsigned int cnt)
-{
- lockdep_assert_irqs_disabled();
-
- if (preempt_count() == cnt)
- trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
-
- if (softirq_count() == (cnt & SOFTIRQ_MASK))
- trace_softirqs_on(_RET_IP_);
-
- __preempt_count_sub(cnt);
-}
-
/*
* Special-case - softirqs can safely be enabled by __do_softirq(),
* without processing still-pending softirqs:
@@ -159,7 +146,16 @@ static void __local_bh_enable(unsigned int cnt)
void local_bh_enable_no_softirq(void)
{
WARN_ON_ONCE(in_irq());
- __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
+ lockdep_assert_irqs_disabled();
+
+ if (preempt_count() == SOFTIRQ_DISABLE_OFFSET)
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
+ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
+ trace_softirqs_on(_RET_IP_);
+
+ __preempt_count_sub(SOFTIRQ_DISABLE_OFFSET);
+
}
EXPORT_SYMBOL(local_bh_enable_no_softirq);

@@ -207,6 +203,52 @@ void local_bh_enable_all(void)
local_bh_enable(SOFTIRQ_ALL_MASK);
}

+static void local_bh_enter(unsigned long ip)
+{
+ unsigned long flags;
+
+ WARN_ON_ONCE(in_irq());
+
+ raw_local_irq_save(flags);
+ /*
+ * The preempt tracer hooks into preempt_count_add and will break
+ * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
+ * is set and before current->softirq_enabled is cleared.
+ * We must manually increment preempt_count here and manually
+ * call the trace_preempt_off later.
+ */
+ __preempt_count_add(SOFTIRQ_OFFSET);
+ /*
+ * Were softirqs turned off above:
+ */
+ if (softirq_count() == SOFTIRQ_OFFSET)
+ trace_softirqs_off(ip);
+ raw_local_irq_restore(flags);
+
+ if (preempt_count() == SOFTIRQ_OFFSET) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ current->preempt_disable_ip = get_lock_parent_ip();
+#endif
+ trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
+ }
+}
+
+static void local_bh_exit(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (preempt_count() == SOFTIRQ_OFFSET)
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
+ if (softirq_count() == SOFTIRQ_OFFSET)
+ trace_softirqs_on(_RET_IP_);
+
+ __preempt_count_sub(SOFTIRQ_OFFSET);
+}
+
+
+
+
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
* but break the loop if need_resched() is set or after 2 ms.
@@ -276,7 +318,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
pending = local_softirq_pending() & local_softirq_enabled();
account_irq_enter_time(current);

- __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+ local_bh_enter(_RET_IP_);
in_hardirq = lockdep_softirq_start();

restart:
@@ -325,7 +367,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)

lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
- __local_bh_enable(SOFTIRQ_OFFSET);
+ local_bh_exit();
WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC);
}
--
2.7.4