net: page_pool: add memory provider helpers
authorPavel Begunkov <asml.silence@gmail.com>
Tue, 4 Feb 2025 21:56:20 +0000 (13:56 -0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 7 Feb 2025 00:27:31 +0000 (16:27 -0800)
Add helpers for memory providers to interact with page pools.
net_mp_niov_{set,clear}_page_pool() serve to [dis]associate a net_iov
with a page pool. If used, the memory provider is responsible to match
"set" calls with "clear" once a net_iov is not going to be used by a page
pool anymore, changing a page pool, etc.

Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: David Wei <dw@davidwei.uk>
Link: https://patch.msgid.link/20250204215622.695511-10-dw@davidwei.uk
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/page_pool/memory_provider.h
net/core/page_pool.c

index 36469a7e649f72bd7ebb217cad21b2f3b4d2d39d..4f0ffb8f6a0a0fbd25b276293e30920bf86a8ddc 100644 (file)
@@ -18,4 +18,23 @@ struct memory_provider_ops {
        void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq);
 };
 
+bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
+void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
+void net_mp_niov_clear_page_pool(struct net_iov *niov);
+
+/**
+  * net_mp_netmem_place_in_cache() - give a netmem to a page pool
+  * @pool:      the page pool to place the netmem into
+  * @netmem:    netmem to give
+  *
+  * Push an accounted netmem into the page pool's allocation cache. The caller
+  * must ensure that there is space in the cache. It should only be called off
+  * the mp_ops->alloc_netmems() path.
+  */
+static inline void net_mp_netmem_place_in_cache(struct page_pool *pool,
+                                               netmem_ref netmem)
+{
+       pool->alloc.cache[pool->alloc.count++] = netmem;
+}
+
 #endif
index d632cf2c91c35b82538607c38cf00e9b57b0d061..686bd4a117d93d86c71da2fdfffc32fcfd7b9e40 100644 (file)
@@ -1197,3 +1197,31 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
        }
 }
 EXPORT_SYMBOL(page_pool_update_nid);
+
+bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr)
+{
+       return page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), addr);
+}
+
+/* Associate a niov with a page pool. Should follow with a matching
+ * net_mp_niov_clear_page_pool()
+ */
+void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov)
+{
+       netmem_ref netmem = net_iov_to_netmem(niov);
+
+       page_pool_set_pp_info(pool, netmem);
+
+       pool->pages_state_hold_cnt++;
+       trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
+}
+
+/* Disassociate a niov from a page pool. Should only be used in the
+ * ->release_netmem() path.
+ */
+void net_mp_niov_clear_page_pool(struct net_iov *niov)
+{
+       netmem_ref netmem = net_iov_to_netmem(niov);
+
+       page_pool_clear_pp_info(netmem);
+}