1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
6 #include <net/xdp_sock_drv.h>
10 static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
15 skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
17 rq->stats->buff_alloc_err++;
21 skb_put_data(skb, data, cqe_bcnt);
26 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
27 struct mlx5e_mpw_info *wi,
32 struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
33 u32 cqe_bcnt32 = cqe_bcnt;
36 /* Check packet size. Note LRO doesn't use linear SKB */
37 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
38 rq->stats->oversize_pkts_sw_drop++;
42 /* head_offset is not used in this function, because xdp->data and the
43 * DMA address point directly to the necessary place. Furthermore, in
44 * the current implementation, UMR pages are mapped to XSK frames, so
45 * head_offset should always be 0.
47 WARN_ON_ONCE(head_offset);
49 xdp->data_end = xdp->data + cqe_bcnt32;
50 xdp_set_data_meta_invalid(xdp);
51 xsk_buff_dma_sync_for_cpu(xdp);
52 net_prefetch(xdp->data);
55 consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
59 * - XDP_REDIRECT to XSKMAP:
60 * The page is owned by the userspace from now.
61 * - XDP_TX and other XDP_REDIRECTs:
62 * The page was returned by ZCA and recycled.
66 * Allocate an SKB, copy the data and recycle the page.
68 * Pages to be recycled go to the Reuse Ring on MPWQE deallocation. Its
69 * size is the same as the Driver RX Ring's size, and pages for WQEs are
70 * allocated first from the Reuse Ring, so it has enough space.
73 if (likely(consumed)) {
74 if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
75 __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
76 return NULL; /* page/packet was consumed by XDP */
79 /* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
80 * frame. On SKB allocation failure, NULL is returned.
82 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt32);
85 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
86 struct mlx5_cqe64 *cqe,
87 struct mlx5e_wqe_frag_info *wi,
90 struct xdp_buff *xdp = wi->di->xsk;
93 /* wi->offset is not used in this function, because xdp->data and the
94 * DMA address point directly to the necessary place. Furthermore, the
95 * XSK allocator allocates frames per packet, instead of pages, so
96 * wi->offset should always be 0.
98 WARN_ON_ONCE(wi->offset);
100 xdp->data_end = xdp->data + cqe_bcnt;
101 xdp_set_data_meta_invalid(xdp);
102 xsk_buff_dma_sync_for_cpu(xdp);
103 net_prefetch(xdp->data);
105 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
106 rq->stats->wqe_err++;
111 consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
114 if (likely(consumed))
115 return NULL; /* page/packet was consumed by XDP */
117 /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
118 * will be handled by mlx5e_put_rx_frag.
119 * On SKB allocation failure, NULL is returned.
121 return mlx5e_xsk_construct_skb(rq, xdp->data, cqe_bcnt);