net: stmmac: introduce wrapper for struct xdp_buff
authorSong Yoong Siang <yoong.siang.song@intel.com>
Sat, 15 Apr 2023 06:45:01 +0000 (14:45 +0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 18 Apr 2023 01:57:26 +0000 (18:57 -0700)
Introduce struct stmmac_xdp_buff as a preparation to support XDP Rx
metadata via kfuncs.

Signed-off-by: Song Yoong Siang <yoong.siang.song@intel.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

index 3d15e1e92e180ebf5687c17344b2101d7416f8da..ac8ccf8517083d8640181555ebc5d11002940473 100644 (file)
@@ -92,6 +92,10 @@ struct stmmac_rx_buffer {
        dma_addr_t sec_addr;
 };
 
+struct stmmac_xdp_buff {
+       struct xdp_buff xdp;
+};
+
 struct stmmac_rx_queue {
        u32 rx_count_frames;
        u32 queue_index;
index d7fcab0570322382599f3627e7a26041cfaf311f..10b9f8912bb2ffa2d9635451e040e54b93954d88 100644 (file)
@@ -5190,7 +5190,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
        enum dma_data_direction dma_dir;
        unsigned int desc_size;
        struct sk_buff *skb = NULL;
-       struct xdp_buff xdp;
+       struct stmmac_xdp_buff ctx;
        int xdp_status = 0;
        int buf_sz;
 
@@ -5311,17 +5311,17 @@ read_again:
                        dma_sync_single_for_cpu(priv->device, buf->addr,
                                                buf1_len, dma_dir);
 
-                       xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
-                       xdp_prepare_buff(&xdp, page_address(buf->page),
+                       xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+                       xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
                                         buf->page_offset, buf1_len, false);
 
-                       pre_len = xdp.data_end - xdp.data_hard_start -
+                       pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
                                  buf->page_offset;
-                       skb = stmmac_xdp_run_prog(priv, &xdp);
+                       skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
                        /* Due xdp_adjust_tail: DMA sync for_device
                         * cover max len CPU touch
                         */
-                       sync_len = xdp.data_end - xdp.data_hard_start -
+                       sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
                                   buf->page_offset;
                        sync_len = max(sync_len, pre_len);
 
@@ -5331,7 +5331,7 @@ read_again:
 
                                if (xdp_res & STMMAC_XDP_CONSUMED) {
                                        page_pool_put_page(rx_q->page_pool,
-                                                          virt_to_head_page(xdp.data),
+                                                          virt_to_head_page(ctx.xdp.data),
                                                           sync_len, true);
                                        buf->page = NULL;
                                        priv->dev->stats.rx_dropped++;
@@ -5359,7 +5359,7 @@ read_again:
 
                if (!skb) {
                        /* XDP program may expand or reduce tail */
-                       buf1_len = xdp.data_end - xdp.data;
+                       buf1_len = ctx.xdp.data_end - ctx.xdp.data;
 
                        skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
                        if (!skb) {
@@ -5369,7 +5369,7 @@ read_again:
                        }
 
                        /* XDP program may adjust header */
-                       skb_copy_to_linear_data(skb, xdp.data, buf1_len);
+                       skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
                        skb_put(skb, buf1_len);
 
                        /* Data payload copied into SKB, page ready for recycle */