1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock.h>
9 #include "i40e_txrx_common.h"
13 * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs
16 * Returns 0 on success, <0 on failure
18 static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
23 vsi->num_xsk_umems_used = 0;
24 vsi->num_xsk_umems = vsi->alloc_queue_pairs;
25 vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
27 if (!vsi->xsk_umems) {
28 vsi->num_xsk_umems = 0;
36 * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
38 * @umem: UMEM to store
39 * @qid: Ring/qid to associate with the UMEM
41 * Returns 0 on success, <0 on failure
43 static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
48 err = i40e_alloc_xsk_umems(vsi);
52 vsi->xsk_umems[qid] = umem;
53 vsi->num_xsk_umems_used++;
59 * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
61 * @qid: Ring/qid associated with the UMEM
63 static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid)
65 vsi->xsk_umems[qid] = NULL;
66 vsi->num_xsk_umems_used--;
68 if (vsi->num_xsk_umems == 0) {
69 kfree(vsi->xsk_umems);
70 vsi->xsk_umems = NULL;
71 vsi->num_xsk_umems = 0;
76 * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
78 * @umem: UMEM to DMA map
80 * Returns 0 on success, <0 on failure
82 static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
84 struct i40e_pf *pf = vsi->back;
90 for (i = 0; i < umem->npgs; i++) {
91 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
92 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
93 if (dma_mapping_error(dev, dma))
96 umem->pages[i].dma = dma;
102 for (j = 0; j < i; j++) {
103 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
104 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
105 umem->pages[i].dma = 0;
112 * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev
114 * @umem: UMEM to DMA map
116 static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
118 struct i40e_pf *pf = vsi->back;
122 dev = &pf->pdev->dev;
124 for (i = 0; i < umem->npgs; i++) {
125 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
126 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
128 umem->pages[i].dma = 0;
133 * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
136 * @qid: Rx ring to associate UMEM to
138 * Returns 0 on success, <0 on failure
140 static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
143 struct xdp_umem_fq_reuse *reuseq;
147 if (vsi->type != I40E_VSI_MAIN)
150 if (qid >= vsi->num_queue_pairs)
153 if (vsi->xsk_umems) {
154 if (qid >= vsi->num_xsk_umems)
156 if (vsi->xsk_umems[qid])
160 reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
164 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
166 err = i40e_xsk_umem_dma_map(vsi, umem);
170 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
173 err = i40e_queue_pair_disable(vsi, qid);
178 err = i40e_add_xsk_umem(vsi, umem, qid);
183 err = i40e_queue_pair_enable(vsi, qid);
187 /* Kick start the NAPI context so that receiving will start */
188 err = i40e_xsk_async_xmit(vsi->netdev, qid);
197 * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
199 * @qid: Rx ring to associate UMEM to
201 * Returns 0 on success, <0 on failure
203 static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
208 if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
209 !vsi->xsk_umems[qid])
212 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
215 err = i40e_queue_pair_disable(vsi, qid);
220 i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
221 i40e_remove_xsk_umem(vsi, qid);
224 err = i40e_queue_pair_enable(vsi, qid);
233 * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM
235 * @umem: UMEM associated to the ring, if any
236 * @qid: Rx ring to associate UMEM to
238 * This function will store, if any, the UMEM associated to certain ring.
240 * Returns 0 on success, <0 on failure
242 int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
245 if (vsi->type != I40E_VSI_MAIN)
248 if (qid >= vsi->num_queue_pairs)
251 if (vsi->xsk_umems) {
252 if (qid >= vsi->num_xsk_umems)
254 *umem = vsi->xsk_umems[qid];
263 * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
265 * @umem: UMEM to enable/associate to a ring, or NULL to disable
266 * @qid: Rx ring to (dis)associate UMEM (from)to
268 * This function enables or disables a UMEM to a certain ring.
270 * Returns 0 on success, <0 on failure
272 int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
275 return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
276 i40e_xsk_umem_disable(vsi, qid);
280 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
282 * @xdp: xdp_buff used as input to the XDP program
284 * This function enables or disables a UMEM to a certain ring.
286 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
288 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
290 int err, result = I40E_XDP_PASS;
291 struct i40e_ring *xdp_ring;
292 struct bpf_prog *xdp_prog;
296 /* NB! xdp_prog will always be !NULL, due to the fact that
297 * this path is enabled by setting an XDP program.
299 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
300 act = bpf_prog_run_xdp(xdp_prog, xdp);
301 xdp->handle += xdp->data - xdp->data_hard_start;
306 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
307 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
310 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
311 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
314 bpf_warn_invalid_xdp_action(act);
316 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
317 /* fallthrough -- handle aborts by dropping packet */
319 result = I40E_XDP_CONSUMED;
327 * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer
329 * @bi: Rx buffer to populate
331 * This function allocates an Rx buffer. The buffer can come from fill
332 * queue, or via the recycle queue (next_to_alloc).
334 * Returns true for a successful allocation, false otherwise
336 static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
337 struct i40e_rx_buffer *bi)
339 struct xdp_umem *umem = rx_ring->xsk_umem;
340 void *addr = bi->addr;
344 rx_ring->rx_stats.page_reuse_count++;
348 if (!xsk_umem_peek_addr(umem, &handle)) {
349 rx_ring->rx_stats.alloc_page_failed++;
353 hr = umem->headroom + XDP_PACKET_HEADROOM;
355 bi->dma = xdp_umem_get_dma(umem, handle);
358 bi->addr = xdp_umem_get_data(umem, handle);
361 bi->handle = handle + umem->headroom;
363 xsk_umem_discard_addr(umem);
368 * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer
370 * @bi: Rx buffer to populate
372 * This function allocates an Rx buffer. The buffer can come from fill
373 * queue, or via the reuse queue.
375 * Returns true for a successful allocation, false otherwise
377 static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
378 struct i40e_rx_buffer *bi)
380 struct xdp_umem *umem = rx_ring->xsk_umem;
383 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
384 rx_ring->rx_stats.alloc_page_failed++;
388 handle &= rx_ring->xsk_umem->chunk_mask;
390 hr = umem->headroom + XDP_PACKET_HEADROOM;
392 bi->dma = xdp_umem_get_dma(umem, handle);
395 bi->addr = xdp_umem_get_data(umem, handle);
398 bi->handle = handle + umem->headroom;
400 xsk_umem_discard_addr_rq(umem);
404 static __always_inline bool
405 __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
406 bool alloc(struct i40e_ring *rx_ring,
407 struct i40e_rx_buffer *bi))
409 u16 ntu = rx_ring->next_to_use;
410 union i40e_rx_desc *rx_desc;
411 struct i40e_rx_buffer *bi;
414 rx_desc = I40E_RX_DESC(rx_ring, ntu);
415 bi = &rx_ring->rx_bi[ntu];
417 if (!alloc(rx_ring, bi)) {
422 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
426 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
432 if (unlikely(ntu == rx_ring->count)) {
433 rx_desc = I40E_RX_DESC(rx_ring, 0);
438 rx_desc->wb.qword1.status_error_len = 0;
443 if (rx_ring->next_to_use != ntu)
444 i40e_release_rx_desc(rx_ring, ntu);
450 * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
452 * @count: The number of buffers to allocate
454 * This function allocates a number of Rx buffers from the reuse queue
455 * or fill ring and places them on the Rx ring.
457 * Returns true for a successful allocation, false otherwise
459 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
461 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
462 i40e_alloc_buffer_slow_zc);
466 * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
468 * @count: The number of buffers to allocate
470 * This function allocates a number of Rx buffers from the fill ring
471 * or the internal recycle mechanism and places them on the Rx ring.
473 * Returns true for a successful allocation, false otherwise
475 static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
477 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
478 i40e_alloc_buffer_zc);
482 * i40e_get_rx_buffer_zc - Return the current Rx buffer
484 * @size: The size of the rx buffer (read from descriptor)
486 * This function returns the current, received Rx buffer, and also
487 * does DMA synchronization. the Rx ring.
489 * Returns the received Rx buffer
491 static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
492 const unsigned int size)
494 struct i40e_rx_buffer *bi;
496 bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
498 /* we are reusing so sync this buffer for CPU use */
499 dma_sync_single_range_for_cpu(rx_ring->dev,
508 * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer
510 * @old_bi: The Rx buffer to recycle
512 * This function recycles a finished Rx buffer, and places it on the
513 * recycle queue (next_to_alloc).
515 static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
516 struct i40e_rx_buffer *old_bi)
518 struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
519 unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
520 u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
521 u16 nta = rx_ring->next_to_alloc;
523 /* update, and store next to alloc */
525 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
527 /* transfer page from old buffer to new buffer */
528 new_bi->dma = old_bi->dma & mask;
531 new_bi->addr = (void *)((unsigned long)old_bi->addr & mask);
534 new_bi->handle = old_bi->handle & mask;
535 new_bi->handle += rx_ring->xsk_umem->headroom;
541 * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations
542 * @alloc: Zero-copy allocator
543 * @handle: Buffer handle
545 void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
547 struct i40e_rx_buffer *bi;
548 struct i40e_ring *rx_ring;
552 rx_ring = container_of(alloc, struct i40e_ring, zca);
553 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
554 mask = rx_ring->xsk_umem->chunk_mask;
556 nta = rx_ring->next_to_alloc;
557 bi = &rx_ring->rx_bi[nta];
560 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
564 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
567 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
570 bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
574 * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer
579 * This functions allocates a new skb from a zero-copy Rx buffer.
581 * Returns the skb, or NULL on failure.
583 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
584 struct i40e_rx_buffer *bi,
585 struct xdp_buff *xdp)
587 unsigned int metasize = xdp->data - xdp->data_meta;
588 unsigned int datasize = xdp->data_end - xdp->data;
591 /* allocate a skb to store the frags */
592 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
593 xdp->data_end - xdp->data_hard_start,
594 GFP_ATOMIC | __GFP_NOWARN);
598 skb_reserve(skb, xdp->data - xdp->data_hard_start);
599 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
601 skb_metadata_set(skb, metasize);
603 i40e_reuse_rx_buffer_zc(rx_ring, bi);
608 * i40e_inc_ntc: Advance the next_to_clean index
611 static void i40e_inc_ntc(struct i40e_ring *rx_ring)
613 u32 ntc = rx_ring->next_to_clean + 1;
615 ntc = (ntc < rx_ring->count) ? ntc : 0;
616 rx_ring->next_to_clean = ntc;
617 prefetch(I40E_RX_DESC(rx_ring, ntc));
621 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
623 * @budget: NAPI budget
625 * Returns amount of work completed
627 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
629 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
630 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
631 unsigned int xdp_res, xdp_xmit = 0;
632 bool failure = false;
636 xdp.rxq = &rx_ring->xdp_rxq;
638 while (likely(total_rx_packets < (unsigned int)budget)) {
639 struct i40e_rx_buffer *bi;
640 union i40e_rx_desc *rx_desc;
644 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
646 !i40e_alloc_rx_buffers_fast_zc(rx_ring,
651 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
652 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
654 /* This memory barrier is needed to keep us from reading
655 * any other fields out of the rx_desc until we have
656 * verified the descriptor has been written back.
660 bi = i40e_clean_programming_status(rx_ring, rx_desc,
663 i40e_reuse_rx_buffer_zc(rx_ring, bi);
668 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
669 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
673 bi = i40e_get_rx_buffer_zc(rx_ring, size);
675 xdp.data_meta = xdp.data;
676 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
677 xdp.data_end = xdp.data + size;
678 xdp.handle = bi->handle;
680 xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
682 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
686 i40e_reuse_rx_buffer_zc(rx_ring, bi);
689 total_rx_bytes += size;
693 i40e_inc_ntc(rx_ring);
699 /* NB! We are not checking for errors using
700 * i40e_test_staterr with
701 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
702 * SBP is *not* set in PRT_SBPVSI (default not set).
704 skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
706 rx_ring->rx_stats.alloc_buff_failed++;
711 i40e_inc_ntc(rx_ring);
713 if (eth_skb_pad(skb))
716 total_rx_bytes += skb->len;
719 i40e_process_skb_fields(rx_ring, rx_desc, skb);
720 napi_gro_receive(&rx_ring->q_vector->napi, skb);
723 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
724 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
725 return failure ? budget : (int)total_rx_packets;
729 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
730 * @xdp_ring: XDP Tx ring
731 * @budget: NAPI budget
733 * Returns true if the work is finished.
735 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
737 struct i40e_tx_desc *tx_desc = NULL;
738 struct i40e_tx_buffer *tx_bi;
739 bool work_done = true;
743 while (budget-- > 0) {
744 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
745 xdp_ring->tx_stats.tx_busy++;
750 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
753 dma_sync_single_for_device(xdp_ring->dev, dma, len,
756 tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
757 tx_bi->bytecount = len;
759 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
760 tx_desc->buffer_addr = cpu_to_le64(dma);
761 tx_desc->cmd_type_offset_bsz =
762 build_ctob(I40E_TX_DESC_CMD_ICRC
763 | I40E_TX_DESC_CMD_EOP,
766 xdp_ring->next_to_use++;
767 if (xdp_ring->next_to_use == xdp_ring->count)
768 xdp_ring->next_to_use = 0;
772 /* Request an interrupt for the last frame and bump tail ptr. */
773 tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
774 I40E_TXD_QW1_CMD_SHIFT);
775 i40e_xdp_ring_update_tail(xdp_ring);
777 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
780 return !!budget && work_done;
784 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
785 * @tx_ring: XDP Tx ring
786 * @tx_bi: Tx buffer info to clean
788 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
789 struct i40e_tx_buffer *tx_bi)
791 xdp_return_frame(tx_bi->xdpf);
792 dma_unmap_single(tx_ring->dev,
793 dma_unmap_addr(tx_bi, dma),
794 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
795 dma_unmap_len_set(tx_bi, len, 0);
799 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
800 * @tx_ring: XDP Tx ring
801 * @tx_bi: Tx buffer info to clean
803 * Returns true if cleanup/tranmission is done.
805 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
806 struct i40e_ring *tx_ring, int napi_budget)
808 unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
809 u32 i, completed_frames, frames_ready, xsk_frames = 0;
810 struct xdp_umem *umem = tx_ring->xsk_umem;
811 u32 head_idx = i40e_get_head(tx_ring);
812 bool work_done = true, xmit_done;
813 struct i40e_tx_buffer *tx_bi;
815 if (head_idx < tx_ring->next_to_clean)
816 head_idx += tx_ring->count;
817 frames_ready = head_idx - tx_ring->next_to_clean;
819 if (frames_ready == 0) {
821 } else if (frames_ready > budget) {
822 completed_frames = budget;
825 completed_frames = frames_ready;
828 ntc = tx_ring->next_to_clean;
830 for (i = 0; i < completed_frames; i++) {
831 tx_bi = &tx_ring->tx_bi[ntc];
834 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
839 total_bytes += tx_bi->bytecount;
841 if (++ntc >= tx_ring->count)
845 tx_ring->next_to_clean += completed_frames;
846 if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
847 tx_ring->next_to_clean -= tx_ring->count;
850 xsk_umem_complete_tx(umem, xsk_frames);
852 i40e_arm_wb(tx_ring, vsi, budget);
853 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
856 xmit_done = i40e_xmit_zc(tx_ring, budget);
858 return work_done && xmit_done;
862 * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit
863 * @dev: the netdevice
864 * @queue_id: queue id to wake up
866 * Returns <0 for errors, 0 otherwise.
868 int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id)
870 struct i40e_netdev_priv *np = netdev_priv(dev);
871 struct i40e_vsi *vsi = np->vsi;
872 struct i40e_ring *ring;
874 if (test_bit(__I40E_VSI_DOWN, vsi->state))
877 if (!i40e_enabled_xdp_vsi(vsi))
880 if (queue_id >= vsi->num_queue_pairs)
883 if (!vsi->xdp_rings[queue_id]->xsk_umem)
886 ring = vsi->xdp_rings[queue_id];
888 /* The idea here is that if NAPI is running, mark a miss, so
889 * it will run again. If not, trigger an interrupt and
890 * schedule the NAPI from interrupt context. If NAPI would be
891 * scheduled here, the interrupt affinity would not be
894 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
895 i40e_force_wb(vsi, ring->q_vector);
900 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
904 for (i = 0; i < rx_ring->count; i++) {
905 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
910 xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
916 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
917 * @xdp_ring: XDP Tx ring
919 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
921 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
922 struct xdp_umem *umem = tx_ring->xsk_umem;
923 struct i40e_tx_buffer *tx_bi;
927 tx_bi = &tx_ring->tx_bi[ntc];
930 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
937 if (ntc >= tx_ring->count)
942 xsk_umem_complete_tx(umem, xsk_frames);
946 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
949 * Returns true if any of the Rx rings has an AF_XDP UMEM attached
951 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
958 for (i = 0; i < vsi->num_queue_pairs; i++) {
959 if (vsi->xsk_umems[i])