xsk: Use pool->dma_pages to check for DMA
authorKal Conley <kal.conley@dectris.com>
Sun, 23 Apr 2023 18:01:56 +0000 (20:01 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Thu, 27 Apr 2023 20:24:51 +0000 (22:24 +0200)
Compare pool->dma_pages instead of pool->dma_pages_cnt to check for an
active DMA mapping. pool->dma_pages needs to be read anyway to access
the map so this compiles to more efficient code.

Signed-off-by: Kal Conley <kal.conley@dectris.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20230423180157.93559-1-kal.conley@dectris.com
include/net/xsk_buff_pool.h
net/xdp/xsk_buff_pool.c

index d318c769b445499e03fd7f8977d59ef9be887677..a8d7b8a3688a6e11f3b9a8ef52dba8a4c636cda9 100644 (file)
@@ -180,7 +180,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
        if (likely(!cross_pg))
                return false;
 
-       return pool->dma_pages_cnt &&
+       return pool->dma_pages &&
               !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
 }
 
index b2df1e0f81538aa87cd5ee755307e6f7bf6a8a48..26f6d304451e9e310ab3f120eb5d494d4c05f841 100644 (file)
@@ -350,7 +350,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
 {
        struct xsk_dma_map *dma_map;
 
-       if (pool->dma_pages_cnt == 0)
+       if (!pool->dma_pages)
                return;
 
        dma_map = xp_find_dma_map(pool);
@@ -364,6 +364,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
 
        __xp_dma_unmap(dma_map, attrs);
        kvfree(pool->dma_pages);
+       pool->dma_pages = NULL;
        pool->dma_pages_cnt = 0;
        pool->dev = NULL;
 }
@@ -503,7 +504,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
        if (pool->unaligned) {
                xskb = pool->free_heads[--pool->free_heads_cnt];
                xp_init_xskb_addr(xskb, pool, addr);
-               if (pool->dma_pages_cnt)
+               if (pool->dma_pages)
                        xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
        } else {
                xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
@@ -569,7 +570,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd
                if (pool->unaligned) {
                        xskb = pool->free_heads[--pool->free_heads_cnt];
                        xp_init_xskb_addr(xskb, pool, addr);
-                       if (pool->dma_pages_cnt)
+                       if (pool->dma_pages)
                                xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
                } else {
                        xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];