net: Add control functions for irq suspension
authorMartin Karsten <mkarsten@uwaterloo.ca>
Sat, 9 Nov 2024 05:02:32 +0000 (05:02 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 12 Nov 2024 02:45:06 +0000 (18:45 -0800)
The napi_suspend_irqs routine bootstraps irq suspension by elongating
the defer timeout to irq_suspend_timeout.

The napi_resume_irqs routine effectively cancels irq suspension by
forcing the napi to be scheduled immediately.

Signed-off-by: Martin Karsten <mkarsten@uwaterloo.ca>
Co-developed-by: Joe Damato <jdamato@fastly.com>
Signed-off-by: Joe Damato <jdamato@fastly.com>
Tested-by: Joe Damato <jdamato@fastly.com>
Tested-by: Martin Karsten <mkarsten@uwaterloo.ca>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Link: https://patch.msgid.link/20241109050245.191288-3-jdamato@fastly.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/busy_poll.h
net/core/dev.c

index f03040baaefd80b55023c81eb8e062910d6776c0..c858270141bc217595c92e994a92e47bec06054e 100644 (file)
@@ -52,6 +52,9 @@ void napi_busy_loop_rcu(unsigned int napi_id,
                        bool (*loop_end)(void *, unsigned long),
                        void *loop_end_arg, bool prefer_busy_poll, u16 budget);
 
+void napi_suspend_irqs(unsigned int napi_id);
+void napi_resume_irqs(unsigned int napi_id);
+
 #else /* CONFIG_NET_RX_BUSY_POLL */
 static inline unsigned long net_busy_loop_on(void)
 {
index 4d910872963fa1a9c5cfead02d7105545b1f9eb6..13d00fc10f55998077cb643a2f6e3c171974589d 100644 (file)
@@ -6507,6 +6507,43 @@ void napi_busy_loop(unsigned int napi_id,
 }
 EXPORT_SYMBOL(napi_busy_loop);
 
+void napi_suspend_irqs(unsigned int napi_id)
+{
+       struct napi_struct *napi;
+
+       rcu_read_lock();
+       napi = napi_by_id(napi_id);
+       if (napi) {
+               unsigned long timeout = napi_get_irq_suspend_timeout(napi);
+
+               if (timeout)
+                       hrtimer_start(&napi->timer, ns_to_ktime(timeout),
+                                     HRTIMER_MODE_REL_PINNED);
+       }
+       rcu_read_unlock();
+}
+
+void napi_resume_irqs(unsigned int napi_id)
+{
+       struct napi_struct *napi;
+
+       rcu_read_lock();
+       napi = napi_by_id(napi_id);
+       if (napi) {
+               /* If irq_suspend_timeout is set to 0 between the call to
+                * napi_suspend_irqs and now, the original value still
+                * determines the safety timeout as intended and napi_watchdog
+                * will resume irq processing.
+                */
+               if (napi_get_irq_suspend_timeout(napi)) {
+                       local_bh_disable();
+                       napi_schedule(napi);
+                       local_bh_enable();
+               }
+       }
+       rcu_read_unlock();
+}
+
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 
 static void __napi_hash_add_with_id(struct napi_struct *napi,