[PATCH net-next v7 2/6] net: Add control functions for irq suspension

From: Joe Damato
Date: Thu Nov 07 2024 - 21:40:18 EST


From: Martin Karsten <mkarsten@xxxxxxxxxxxx>

The napi_suspend_irqs routine bootstraps irq suspension by elongating
the defer timeout to irq_suspend_timeout.

The napi_resume_irqs routine effectively cancels irq suspension by
forcing the napi to be scheduled immediately.

Signed-off-by: Martin Karsten <mkarsten@xxxxxxxxxxxx>
Co-developed-by: Joe Damato <jdamato@xxxxxxxxxx>
Signed-off-by: Joe Damato <jdamato@xxxxxxxxxx>
Tested-by: Joe Damato <jdamato@xxxxxxxxxx>
Tested-by: Martin Karsten <mkarsten@xxxxxxxxxxxx>
Acked-by: Stanislav Fomichev <sdf@xxxxxxxxxxx>
Reviewed-by: Sridhar Samudrala <sridhar.samudrala@xxxxxxxxx>
---
v7:
- This is now patch 2 instead of patch 3; patch 2 from v6 was dropped.

v1 -> v2:
- Added a comment to napi_resume_irqs.

include/net/busy_poll.h | 3 +++
net/core/dev.c | 39 +++++++++++++++++++++++++++++++++++++++
2 files changed, 42 insertions(+)

diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index f03040baaefd..c858270141bc 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -52,6 +52,9 @@ void napi_busy_loop_rcu(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget);

+void napi_suspend_irqs(unsigned int napi_id);
+void napi_resume_irqs(unsigned int napi_id);
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
diff --git a/net/core/dev.c b/net/core/dev.c
index 4d910872963f..81f5e4175a6a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6507,6 +6507,45 @@ void napi_busy_loop(unsigned int napi_id,
}
EXPORT_SYMBOL(napi_busy_loop);

+void napi_suspend_irqs(unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ rcu_read_lock();
+ napi = napi_by_id(napi_id);
+ if (napi) {
+ unsigned long timeout = napi_get_irq_suspend_timeout(napi);
+
+ if (timeout)
+ hrtimer_start(&napi->timer, ns_to_ktime(timeout),
+ HRTIMER_MODE_REL_PINNED);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(napi_suspend_irqs);
+
+void napi_resume_irqs(unsigned int napi_id)
+{
+ struct napi_struct *napi;
+
+ rcu_read_lock();
+ napi = napi_by_id(napi_id);
+ if (napi) {
+ /* If irq_suspend_timeout is set to 0 between the call to
+ * napi_suspend_irqs and now, the original value still
+ * determines the safety timeout as intended and napi_watchdog
+ * will resume irq processing.
+ */
+ if (napi_get_irq_suspend_timeout(napi)) {
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(napi_resume_irqs);
+
#endif /* CONFIG_NET_RX_BUSY_POLL */

static void __napi_hash_add_with_id(struct napi_struct *napi,
--
2.25.1