net: add helpers for setting a memory provider on an rx queue
authorDavid Wei <dw@davidwei.uk>
Tue, 4 Feb 2025 21:56:21 +0000 (13:56 -0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 7 Feb 2025 00:27:31 +0000 (16:27 -0800)
Add helpers that properly prep or remove a memory provider for an rx
queue then restart the queue.

Reviewed-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
Link: https://patch.msgid.link/20250204215622.695511-11-dw@davidwei.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/memory_provider.h
net/core/netdev_rx_queue.c

index 4f0ffb8f6a0a0fbd25b276293e30920bf86a8ddc..b3e66589776758f1b32217a67aca2690366d2193 100644 (file)
@@ -22,6 +22,11 @@ bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
 void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
 void net_mp_niov_clear_page_pool(struct net_iov *niov);
 
+int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+                   struct pp_memory_provider_params *p);
+void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+                     struct pp_memory_provider_params *old_p);
+
 /**
   * net_mp_netmem_place_in_cache() - give a netmem to a page pool
   * @pool:      the page pool to place the netmem into
index db82786fa0c40543404922108feb8c1099ecf6ac..db46880f37ccf493cc8107a11f717068fb6df8f3 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/netdevice.h>
 #include <net/netdev_queues.h>
 #include <net/netdev_rx_queue.h>
+#include <net/page_pool/memory_provider.h>
 
 #include "page_pool_priv.h"
 
@@ -80,3 +81,71 @@ err_free_new_mem:
        return err;
 }
 EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
+
+static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+                            struct pp_memory_provider_params *p)
+{
+       struct netdev_rx_queue *rxq;
+       int ret;
+
+       if (ifq_idx >= dev->real_num_rx_queues)
+               return -EINVAL;
+       ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
+
+       rxq = __netif_get_rx_queue(dev, ifq_idx);
+       if (rxq->mp_params.mp_ops)
+               return -EEXIST;
+
+       rxq->mp_params = *p;
+       ret = netdev_rx_queue_restart(dev, ifq_idx);
+       if (ret) {
+               rxq->mp_params.mp_ops = NULL;
+               rxq->mp_params.mp_priv = NULL;
+       }
+       return ret;
+}
+
+int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+                   struct pp_memory_provider_params *p)
+{
+       int ret;
+
+       rtnl_lock();
+       ret = __net_mp_open_rxq(dev, ifq_idx, p);
+       rtnl_unlock();
+       return ret;
+}
+
+static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+                             struct pp_memory_provider_params *old_p)
+{
+       struct netdev_rx_queue *rxq;
+
+       if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
+               return;
+
+       rxq = __netif_get_rx_queue(dev, ifq_idx);
+
+       /* Callers holding a netdev ref may get here after we already
+        * went thru shutdown via dev_memory_provider_uninstall().
+        */
+       if (dev->reg_state > NETREG_REGISTERED &&
+           !rxq->mp_params.mp_ops)
+               return;
+
+       if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops ||
+                        rxq->mp_params.mp_priv != old_p->mp_priv))
+               return;
+
+       rxq->mp_params.mp_ops = NULL;
+       rxq->mp_params.mp_priv = NULL;
+       WARN_ON(netdev_rx_queue_restart(dev, ifq_idx));
+}
+
+void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+                     struct pp_memory_provider_params *old_p)
+{
+       rtnl_lock();
+       __net_mp_close_rxq(dev, ifq_idx, old_p);
+       rtnl_unlock();
+}