1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/filter.h>
18 #include <net/page_pool.h>
23 DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
25 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
26 struct bnxt_tx_ring_info *txr,
27 dma_addr_t mapping, u32 len,
30 struct skb_shared_info *sinfo;
31 struct bnxt_sw_tx_bd *tx_buf, *first_buf;
38 if (xdp && xdp_buff_has_frags(xdp)) {
39 sinfo = xdp_get_shared_info_from_buff(xdp);
40 num_frags = sinfo->nr_frags;
43 /* fill up the first buffer */
45 tx_buf = &txr->tx_buf_ring[prod];
47 tx_buf->nr_frags = num_frags;
49 tx_buf->page = virt_to_head_page(xdp->data);
51 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
52 flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT);
53 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
54 txbd->tx_bd_opaque = prod;
55 txbd->tx_bd_haddr = cpu_to_le64(mapping);
57 /* now let us fill up the frags into the next buffers */
58 for (i = 0; i < num_frags ; i++) {
59 skb_frag_t *frag = &sinfo->frags[i];
60 struct bnxt_sw_tx_bd *frag_tx_buf;
61 struct pci_dev *pdev = bp->pdev;
62 dma_addr_t frag_mapping;
68 /* first fill up the first buffer */
69 frag_tx_buf = &txr->tx_buf_ring[prod];
70 frag_tx_buf->page = skb_frag_page(frag);
72 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
74 frag_len = skb_frag_size(frag);
75 frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
76 frag_len, DMA_TO_DEVICE);
78 if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
81 dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
83 flags = frag_len << TX_BD_LEN_SHIFT;
84 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
85 txbd->tx_bd_opaque = prod;
86 txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
92 txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
93 TX_BD_FLAGS_PACKET_END);
102 static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
103 dma_addr_t mapping, u32 len, u16 rx_prod,
104 struct xdp_buff *xdp)
106 struct bnxt_sw_tx_bd *tx_buf;
108 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
109 tx_buf->rx_prod = rx_prod;
110 tx_buf->action = XDP_TX;
114 static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
115 struct bnxt_tx_ring_info *txr,
116 dma_addr_t mapping, u32 len,
117 struct xdp_frame *xdpf)
119 struct bnxt_sw_tx_bd *tx_buf;
121 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
122 tx_buf->action = XDP_REDIRECT;
124 dma_unmap_addr_set(tx_buf, mapping, mapping);
125 dma_unmap_len_set(tx_buf, len, 0);
128 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
130 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
131 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
132 bool rx_doorbell_needed = false;
133 struct bnxt_sw_tx_bd *tx_buf;
134 u16 tx_cons = txr->tx_cons;
135 u16 last_tx_cons = tx_cons;
138 for (i = 0; i < nr_pkts; i++) {
139 tx_buf = &txr->tx_buf_ring[tx_cons];
141 if (tx_buf->action == XDP_REDIRECT) {
142 struct pci_dev *pdev = bp->pdev;
144 dma_unmap_single(&pdev->dev,
145 dma_unmap_addr(tx_buf, mapping),
146 dma_unmap_len(tx_buf, len),
148 xdp_return_frame(tx_buf->xdpf);
151 } else if (tx_buf->action == XDP_TX) {
152 rx_doorbell_needed = true;
153 last_tx_cons = tx_cons;
155 frags = tx_buf->nr_frags;
156 for (j = 0; j < frags; j++) {
157 tx_cons = NEXT_TX(tx_cons);
158 tx_buf = &txr->tx_buf_ring[tx_cons];
159 page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
162 tx_cons = NEXT_TX(tx_cons);
164 txr->tx_cons = tx_cons;
165 if (rx_doorbell_needed) {
166 tx_buf = &txr->tx_buf_ring[last_tx_cons];
167 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
172 bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
174 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
179 void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
180 u16 cons, u8 **data_ptr, unsigned int *len,
181 struct xdp_buff *xdp)
183 struct bnxt_sw_rx_bd *rx_buf;
184 struct pci_dev *pdev;
189 rx_buf = &rxr->rx_buf_ring[cons];
190 offset = bp->rx_offset;
192 mapping = rx_buf->mapping - bp->rx_dma_offset;
193 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
195 xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq);
196 xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
199 void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
200 struct xdp_buff *xdp)
202 struct skb_shared_info *shinfo;
205 if (!xdp || !xdp_buff_has_frags(xdp))
207 shinfo = xdp_get_shared_info_from_buff(xdp);
208 for (i = 0; i < shinfo->nr_frags; i++) {
209 struct page *page = skb_frag_page(&shinfo->frags[i]);
211 page_pool_recycle_direct(rxr->page_pool, page);
213 shinfo->nr_frags = 0;
216 /* returns the following:
217 * true - packet consumed by XDP and new buffer is allocated.
218 * false - packet should be passed to the stack.
220 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
221 struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
223 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
224 struct bnxt_tx_ring_info *txr;
225 struct bnxt_sw_rx_bd *rx_buf;
226 struct pci_dev *pdev;
238 offset = bp->rx_offset;
240 txr = rxr->bnapi->tx_ring;
241 /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
242 orig_data = xdp.data;
244 act = bpf_prog_run_xdp(xdp_prog, &xdp);
246 tx_avail = bnxt_tx_avail(bp, txr);
247 /* If the tx ring is not full, we must not update the rx producer yet
248 * because we may still be transmitting on some BDs.
250 if (tx_avail != bp->tx_ring_size)
251 *event &= ~BNXT_RX_EVENT;
253 *len = xdp.data_end - xdp.data;
254 if (orig_data != xdp.data)
255 offset = xdp.data - xdp.data_hard_start;
262 rx_buf = &rxr->rx_buf_ring[cons];
263 mapping = rx_buf->mapping - bp->rx_dma_offset;
266 if (unlikely(xdp_buff_has_frags(&xdp))) {
267 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
269 tx_needed += sinfo->nr_frags;
270 *event = BNXT_AGG_EVENT;
273 if (tx_avail < tx_needed) {
274 trace_xdp_exception(bp->dev, xdp_prog, act);
275 bnxt_xdp_buff_frags_free(rxr, &xdp);
276 bnxt_reuse_rx_data(rxr, cons, page);
280 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
283 *event |= BNXT_TX_EVENT;
284 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
285 NEXT_RX(rxr->rx_prod), &xdp);
286 bnxt_reuse_rx_data(rxr, cons, page);
289 /* if we are calling this here then we know that the
290 * redirect is coming from a frame received by the
293 rx_buf = &rxr->rx_buf_ring[cons];
294 mapping = rx_buf->mapping - bp->rx_dma_offset;
295 dma_unmap_page_attrs(&pdev->dev, mapping,
296 PAGE_SIZE, bp->rx_dir,
297 DMA_ATTR_WEAK_ORDERING);
299 /* if we are unable to allocate a new buffer, abort and reuse */
300 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
301 trace_xdp_exception(bp->dev, xdp_prog, act);
302 bnxt_xdp_buff_frags_free(rxr, &xdp);
303 bnxt_reuse_rx_data(rxr, cons, page);
307 if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
308 trace_xdp_exception(bp->dev, xdp_prog, act);
309 page_pool_recycle_direct(rxr->page_pool, page);
313 *event |= BNXT_REDIRECT_EVENT;
316 bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
319 trace_xdp_exception(bp->dev, xdp_prog, act);
322 bnxt_xdp_buff_frags_free(rxr, &xdp);
323 bnxt_reuse_rx_data(rxr, cons, page);
329 int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
330 struct xdp_frame **frames, u32 flags)
332 struct bnxt *bp = netdev_priv(dev);
333 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
334 struct pci_dev *pdev = bp->pdev;
335 struct bnxt_tx_ring_info *txr;
341 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
342 !bp->tx_nr_rings_xdp ||
346 ring = smp_processor_id() % bp->tx_nr_rings_xdp;
347 txr = &bp->tx_ring[ring];
349 if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
352 if (static_branch_unlikely(&bnxt_xdp_locking_key))
353 spin_lock(&txr->xdp_tx_lock);
355 for (i = 0; i < num_frames; i++) {
356 struct xdp_frame *xdp = frames[i];
358 if (!bnxt_tx_avail(bp, txr))
361 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
364 if (dma_mapping_error(&pdev->dev, mapping))
367 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
371 if (flags & XDP_XMIT_FLUSH) {
372 /* Sync BD data before updating doorbell */
374 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
377 if (static_branch_unlikely(&bnxt_xdp_locking_key))
378 spin_unlock(&txr->xdp_tx_lock);
383 /* Under rtnl_lock */
384 static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
386 struct net_device *dev = bp->dev;
387 int tx_xdp = 0, rc, tc;
388 struct bpf_prog *old;
390 if (prog && !prog->aux->xdp_has_frags &&
391 bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
392 netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
393 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
396 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
397 netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
401 tx_xdp = bp->rx_nr_rings;
403 tc = netdev_get_num_tc(dev);
406 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
409 netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
412 if (netif_running(dev))
413 bnxt_close_nic(bp, true, false);
415 old = xchg(&bp->xdp_prog, prog);
420 bnxt_set_rx_skb_mode(bp, true);
424 bnxt_set_rx_skb_mode(bp, false);
425 bnxt_get_max_rings(bp, &rx, &tx, true);
427 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
428 bp->dev->hw_features |= NETIF_F_LRO;
431 bp->tx_nr_rings_xdp = tx_xdp;
432 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
433 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
434 bnxt_set_tpa_flags(bp);
435 bnxt_set_ring_params(bp);
437 if (netif_running(dev))
438 return bnxt_open_nic(bp, true, false);
443 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
445 struct bnxt *bp = netdev_priv(dev);
448 switch (xdp->command) {
450 rc = bnxt_xdp_set(bp, xdp->prog);
460 bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
461 struct page_pool *pool, struct xdp_buff *xdp,
462 struct rx_cmp_ext *rxcmp1)
464 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
468 skb_checksum_none_assert(skb);
469 if (RX_CMP_L4_CS_OK(rxcmp1)) {
470 if (bp->dev->features & NETIF_F_RXCSUM) {
471 skb->ip_summed = CHECKSUM_UNNECESSARY;
472 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
475 xdp_update_skb_shared_info(skb, num_frags,
476 sinfo->xdp_frags_size,
477 PAGE_SIZE * sinfo->nr_frags,
478 xdp_buff_is_frag_pfmemalloc(xdp));