1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/skbuff.h>
10 #include <linux/bpf_trace.h>
11 #include <net/udp_tunnel.h>
16 #include <linux/if_ether.h>
17 #include <linux/if_vlan.h>
18 #include <net/ip6_checksum.h>
21 #include <linux/qed/qed_if.h>
23 /*********************************
24 * Content also used by slowpath *
25 *********************************/
27 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
29 struct sw_rx_data *sw_rx_data;
30 struct eth_rx_bd *rx_bd;
34 /* In case lazy-allocation is allowed, postpone allocation until the
35 * end of the NAPI run. We'd still need to make sure the Rx ring has
36 * sufficient buffers to guarantee an additional Rx interrupt.
38 if (allow_lazy && likely(rxq->filled_buffers > 12)) {
39 rxq->filled_buffers--;
43 data = alloc_pages(GFP_ATOMIC, 0);
47 /* Map the entire page as it would be used
48 * for multiple RX buffer segment size mapping.
50 mapping = dma_map_page(rxq->dev, data, 0,
51 PAGE_SIZE, rxq->data_direction);
52 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
57 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
58 sw_rx_data->page_offset = 0;
59 sw_rx_data->data = data;
60 sw_rx_data->mapping = mapping;
62 /* Advance PROD and get BD pointer */
63 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
65 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
66 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
70 rxq->filled_buffers++;
75 /* Unmap the data and free skb */
76 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
78 u16 idx = txq->sw_tx_cons;
79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
80 struct eth_tx_1st_bd *first_bd;
81 struct eth_tx_bd *tx_data_bd;
84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
85 int i, split_bd_len = 0;
89 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
90 idx, txq->sw_tx_cons, txq->sw_tx_prod);
96 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
100 nbds = first_bd->data.nbds;
103 struct eth_tx_bd *split = (struct eth_tx_bd *)
104 qed_chain_consume(&txq->tx_pbl);
105 split_bd_len = BD_UNMAP_LEN(split);
108 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
109 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
111 /* Unmap the data of the skb frags */
112 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
113 tx_data_bd = (struct eth_tx_bd *)
114 qed_chain_consume(&txq->tx_pbl);
115 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
116 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
119 while (bds_consumed++ < nbds)
120 qed_chain_consume(&txq->tx_pbl);
123 dev_kfree_skb_any(skb);
124 txq->sw_tx_ring.skbs[idx].skb = NULL;
125 txq->sw_tx_ring.skbs[idx].flags = 0;
130 /* Unmap the data and free skb when mapping failed during start_xmit */
131 static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
132 struct eth_tx_1st_bd *first_bd,
133 int nbd, bool data_split)
135 u16 idx = txq->sw_tx_prod;
136 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
137 struct eth_tx_bd *tx_data_bd;
138 int i, split_bd_len = 0;
140 /* Return prod to its position before this skb was handled */
141 qed_chain_set_prod(&txq->tx_pbl,
142 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
144 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
147 struct eth_tx_bd *split = (struct eth_tx_bd *)
148 qed_chain_produce(&txq->tx_pbl);
149 split_bd_len = BD_UNMAP_LEN(split);
153 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
154 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
156 /* Unmap the data of the skb frags */
157 for (i = 0; i < nbd; i++) {
158 tx_data_bd = (struct eth_tx_bd *)
159 qed_chain_produce(&txq->tx_pbl);
160 if (tx_data_bd->nbytes)
161 dma_unmap_page(txq->dev,
162 BD_UNMAP_ADDR(tx_data_bd),
163 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
166 /* Return again prod to its position before this skb was handled */
167 qed_chain_set_prod(&txq->tx_pbl,
168 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
171 dev_kfree_skb_any(skb);
172 txq->sw_tx_ring.skbs[idx].skb = NULL;
173 txq->sw_tx_ring.skbs[idx].flags = 0;
176 static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
178 u32 rc = XMIT_L4_CSUM;
181 if (skb->ip_summed != CHECKSUM_PARTIAL)
184 l3_proto = vlan_get_protocol(skb);
185 if (l3_proto == htons(ETH_P_IPV6) &&
186 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
189 if (skb->encapsulation) {
191 if (skb_is_gso(skb)) {
192 unsigned short gso_type = skb_shinfo(skb)->gso_type;
194 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
195 (gso_type & SKB_GSO_GRE_CSUM))
196 rc |= XMIT_ENC_GSO_L4_CSUM;
209 static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
210 struct eth_tx_2nd_bd *second_bd,
211 struct eth_tx_3rd_bd *third_bd)
214 u16 bd2_bits1 = 0, bd2_bits2 = 0;
216 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
218 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
219 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
220 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
222 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
223 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
225 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
226 l4_proto = ipv6_hdr(skb)->nexthdr;
228 l4_proto = ip_hdr(skb)->protocol;
230 if (l4_proto == IPPROTO_UDP)
231 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
234 third_bd->data.bitfields |=
235 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
236 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
237 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
239 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
240 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
243 static int map_frag_to_bd(struct qede_tx_queue *txq,
244 skb_frag_t *frag, struct eth_tx_bd *bd)
248 /* Map skb non-linear frag data for DMA */
249 mapping = skb_frag_dma_map(txq->dev, frag, 0,
250 skb_frag_size(frag), DMA_TO_DEVICE);
251 if (unlikely(dma_mapping_error(txq->dev, mapping)))
254 /* Setup the data pointer of the frag data */
255 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
260 static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
263 return (skb_inner_transport_header(skb) +
264 inner_tcp_hdrlen(skb) - skb->data);
266 return (skb_transport_header(skb) +
267 tcp_hdrlen(skb) - skb->data);
270 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
271 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
272 static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
274 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
276 if (xmit_type & XMIT_LSO) {
279 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
281 /* linear payload would require its own BD */
282 if (skb_headlen(skb) > hlen)
286 return (skb_shinfo(skb)->nr_frags > allowed_frags);
290 static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
292 /* wmb makes sure that the BDs data is updated before updating the
293 * producer, otherwise FW may read old data from the BDs.
297 writel(txq->tx_db.raw, txq->doorbell_addr);
299 /* Fence required to flush the write combined buffer, since another
300 * CPU may write to the same doorbell address and data may be lost
301 * due to relaxed order nature of write combined bar.
306 static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
307 u16 len, struct page *page, struct xdp_frame *xdpf)
309 struct eth_tx_1st_bd *bd;
310 struct sw_tx_xdp *xdp;
313 if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
314 txq->num_tx_buffers)) {
319 bd = qed_chain_produce(&txq->tx_pbl);
321 bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
323 val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
324 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
326 bd->data.bitfields = cpu_to_le16(val);
328 /* We can safely ignore the offset, as it's 0 for XDP */
329 BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
331 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
336 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
341 int qede_xdp_transmit(struct net_device *dev, int n_frames,
342 struct xdp_frame **frames, u32 flags)
344 struct qede_dev *edev = netdev_priv(dev);
345 struct device *dmadev = &edev->pdev->dev;
346 struct qede_tx_queue *xdp_tx;
347 struct xdp_frame *xdpf;
352 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
355 if (unlikely(!netif_running(dev)))
358 i = smp_processor_id() % edev->total_xdp_queues;
359 xdp_tx = edev->fp_array[i].xdp_tx;
361 spin_lock(&xdp_tx->xdp_tx_lock);
363 for (i = 0; i < n_frames; i++) {
366 mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
368 if (unlikely(dma_mapping_error(dmadev, mapping)))
371 if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
377 if (flags & XDP_XMIT_FLUSH) {
378 xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
380 xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
381 qede_update_tx_producer(xdp_tx);
384 spin_unlock(&xdp_tx->xdp_tx_lock);
389 int qede_txq_has_work(struct qede_tx_queue *txq)
393 /* Tell compiler that consumer and producer can change */
395 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
396 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
399 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
402 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
404 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
405 struct device *dev = &edev->pdev->dev;
406 struct xdp_frame *xdpf;
409 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
412 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
413 xdp_info = xdp_arr + txq->sw_tx_cons;
414 xdpf = xdp_info->xdpf;
417 dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
419 xdp_return_frame(xdpf);
421 xdp_info->xdpf = NULL;
423 dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
425 __free_page(xdp_info->page);
428 qed_chain_consume(&txq->tx_pbl);
429 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
434 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
436 unsigned int pkts_compl = 0, bytes_compl = 0;
437 struct netdev_queue *netdev_txq;
441 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
443 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
446 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
449 rc = qede_free_tx_pkt(edev, txq, &len);
451 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
453 qed_chain_get_cons_idx(&txq->tx_pbl));
459 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
463 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
465 /* Need to make the tx_bd_cons update visible to start_xmit()
466 * before checking for netif_tx_queue_stopped(). Without the
467 * memory barrier, there is a small possibility that
468 * start_xmit() will miss it and cause the queue to be stopped
470 * On the other hand we need an rmb() here to ensure the proper
471 * ordering of bit testing in the following
472 * netif_tx_queue_stopped(txq) call.
476 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
477 /* Taking tx_lock is needed to prevent reenabling the queue
478 * while it's empty. This could have happen if rx_action() gets
479 * suspended in qede_tx_int() after the condition before
480 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
482 * stops the queue->sees fresh tx_bd_cons->releases the queue->
483 * sends some packets consuming the whole queue again->
487 __netif_tx_lock(netdev_txq, smp_processor_id());
489 if ((netif_tx_queue_stopped(netdev_txq)) &&
490 (edev->state == QEDE_STATE_OPEN) &&
491 (qed_chain_get_elem_left(&txq->tx_pbl)
492 >= (MAX_SKB_FRAGS + 1))) {
493 netif_tx_wake_queue(netdev_txq);
494 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
495 "Wake queue was called\n");
498 __netif_tx_unlock(netdev_txq);
504 bool qede_has_rx_work(struct qede_rx_queue *rxq)
506 u16 hw_comp_cons, sw_comp_cons;
508 /* Tell compiler that status block fields can change */
511 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
512 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
514 return hw_comp_cons != sw_comp_cons;
517 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
519 qed_chain_consume(&rxq->rx_bd_ring);
523 /* This function reuses the buffer(from an offset) from
524 * consumer index to producer index in the bd ring
526 static inline void qede_reuse_page(struct qede_rx_queue *rxq,
527 struct sw_rx_data *curr_cons)
529 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
530 struct sw_rx_data *curr_prod;
531 dma_addr_t new_mapping;
533 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
534 *curr_prod = *curr_cons;
536 new_mapping = curr_prod->mapping + curr_prod->page_offset;
538 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
539 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
543 curr_cons->data = NULL;
546 /* In case of allocation failures reuse buffers
547 * from consumer index to produce buffers for firmware
549 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
551 struct sw_rx_data *curr_cons;
553 for (; count > 0; count--) {
554 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
555 qede_reuse_page(rxq, curr_cons);
556 qede_rx_bd_ring_consume(rxq);
560 static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
561 struct sw_rx_data *curr_cons)
563 /* Move to the next segment in the page */
564 curr_cons->page_offset += rxq->rx_buf_seg_size;
566 if (curr_cons->page_offset == PAGE_SIZE) {
567 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
568 /* Since we failed to allocate new buffer
569 * current buffer can be used again.
571 curr_cons->page_offset -= rxq->rx_buf_seg_size;
576 dma_unmap_page(rxq->dev, curr_cons->mapping,
577 PAGE_SIZE, rxq->data_direction);
579 /* Increment refcount of the page as we don't want
580 * network stack to take the ownership of the page
581 * which can be recycled multiple times by the driver.
583 page_ref_inc(curr_cons->data);
584 qede_reuse_page(rxq, curr_cons);
590 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
592 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
593 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
594 struct eth_rx_prod_data rx_prods = {0};
596 /* Update producers */
597 rx_prods.bd_prod = cpu_to_le16(bd_prod);
598 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
600 /* Make sure that the BD and SGE data is updated before updating the
601 * producers since FW might read the BD/SGE right after the producer
606 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
610 static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
612 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
613 enum rss_hash_type htype;
616 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
618 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
619 (htype == RSS_HASH_TYPE_IPV6)) ?
620 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
621 hash = le32_to_cpu(rss_hash);
623 skb_set_hash(skb, hash, hash_type);
626 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
628 skb_checksum_none_assert(skb);
630 if (csum_flag & QEDE_CSUM_UNNECESSARY)
631 skb->ip_summed = CHECKSUM_UNNECESSARY;
633 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
635 skb->encapsulation = 1;
639 static inline void qede_skb_receive(struct qede_dev *edev,
640 struct qede_fastpath *fp,
641 struct qede_rx_queue *rxq,
642 struct sk_buff *skb, u16 vlan_tag)
645 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
647 napi_gro_receive(&fp->napi, skb);
650 static void qede_set_gro_params(struct qede_dev *edev,
652 struct eth_fast_path_rx_tpa_start_cqe *cqe)
654 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
656 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
657 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
658 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
660 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
662 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
666 static int qede_fill_frag_skb(struct qede_dev *edev,
667 struct qede_rx_queue *rxq,
668 u8 tpa_agg_index, u16 len_on_bd)
670 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
672 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
673 struct sk_buff *skb = tpa_info->skb;
675 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
678 /* Add one frag and update the appropriate fields in the skb */
679 skb_fill_page_desc(skb, tpa_info->frag_id++,
681 current_bd->page_offset + rxq->rx_headroom,
684 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
685 /* Incr page ref count to reuse on allocation failure
686 * so that it doesn't get freed while freeing SKB.
688 page_ref_inc(current_bd->data);
692 qede_rx_bd_ring_consume(rxq);
694 skb->data_len += len_on_bd;
695 skb->truesize += rxq->rx_buf_seg_size;
696 skb->len += len_on_bd;
701 tpa_info->state = QEDE_AGG_STATE_ERROR;
702 qede_recycle_rx_bd_ring(rxq, 1);
707 static bool qede_tunn_exist(u16 flag)
709 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
710 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
713 static u8 qede_check_tunn_csum(u16 flag)
718 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
719 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
720 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
721 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
723 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
724 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
725 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
726 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
727 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
730 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
731 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
732 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
733 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
735 if (csum_flag & flag)
736 return QEDE_CSUM_ERROR;
738 return QEDE_CSUM_UNNECESSARY | tcsum;
741 static inline struct sk_buff *
742 qede_build_skb(struct qede_rx_queue *rxq,
743 struct sw_rx_data *bd, u16 len, u16 pad)
748 buf = page_address(bd->data) + bd->page_offset;
749 skb = build_skb(buf, rxq->rx_buf_seg_size);
751 skb_reserve(skb, pad);
757 static struct sk_buff *
758 qede_tpa_rx_build_skb(struct qede_dev *edev,
759 struct qede_rx_queue *rxq,
760 struct sw_rx_data *bd, u16 len, u16 pad,
765 skb = qede_build_skb(rxq, bd, len, pad);
766 bd->page_offset += rxq->rx_buf_seg_size;
768 if (bd->page_offset == PAGE_SIZE) {
769 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
771 "Failed to allocate RX buffer for tpa start\n");
772 bd->page_offset -= rxq->rx_buf_seg_size;
773 page_ref_inc(bd->data);
774 dev_kfree_skb_any(skb);
778 page_ref_inc(bd->data);
779 qede_reuse_page(rxq, bd);
782 /* We've consumed the first BD and prepared an SKB */
783 qede_rx_bd_ring_consume(rxq);
788 static struct sk_buff *
789 qede_rx_build_skb(struct qede_dev *edev,
790 struct qede_rx_queue *rxq,
791 struct sw_rx_data *bd, u16 len, u16 pad)
793 struct sk_buff *skb = NULL;
795 /* For smaller frames still need to allocate skb, memcpy
796 * data and benefit in reusing the page segment instead of
799 if ((len + pad <= edev->rx_copybreak)) {
800 unsigned int offset = bd->page_offset + pad;
802 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
806 skb_reserve(skb, pad);
807 skb_put_data(skb, page_address(bd->data) + offset, len);
808 qede_reuse_page(rxq, bd);
812 skb = qede_build_skb(rxq, bd, len, pad);
814 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
815 /* Incr page ref count to reuse on allocation failure so
816 * that it doesn't get freed while freeing SKB [as its
817 * already mapped there].
819 page_ref_inc(bd->data);
820 dev_kfree_skb_any(skb);
824 /* We've consumed the first BD and prepared an SKB */
825 qede_rx_bd_ring_consume(rxq);
830 static void qede_tpa_start(struct qede_dev *edev,
831 struct qede_rx_queue *rxq,
832 struct eth_fast_path_rx_tpa_start_cqe *cqe)
834 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
835 struct sw_rx_data *sw_rx_data_cons;
838 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
839 pad = cqe->placement_offset + rxq->rx_headroom;
841 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
842 le16_to_cpu(cqe->len_on_first_bd),
844 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
845 tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
847 if (unlikely(!tpa_info->skb)) {
848 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
850 /* Consume from ring but do not produce since
851 * this might be used by FW still, it will be re-used
854 tpa_info->tpa_start_fail = true;
855 qede_rx_bd_ring_consume(rxq);
856 tpa_info->state = QEDE_AGG_STATE_ERROR;
860 tpa_info->frag_id = 0;
861 tpa_info->state = QEDE_AGG_STATE_START;
863 if ((le16_to_cpu(cqe->pars_flags.flags) >>
864 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
865 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
866 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
868 tpa_info->vlan_tag = 0;
870 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
872 /* This is needed in order to enable forwarding support */
873 qede_set_gro_params(edev, tpa_info->skb, cqe);
875 cons_buf: /* We still need to handle bd_len_list to consume buffers */
876 if (likely(cqe->bw_ext_bd_len_list[0]))
877 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
878 le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
880 if (unlikely(cqe->bw_ext_bd_len_list[1])) {
882 "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
883 tpa_info->state = QEDE_AGG_STATE_ERROR;
888 static void qede_gro_ip_csum(struct sk_buff *skb)
890 const struct iphdr *iph = ip_hdr(skb);
893 skb_set_transport_header(skb, sizeof(struct iphdr));
896 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
897 iph->saddr, iph->daddr, 0);
899 tcp_gro_complete(skb);
902 static void qede_gro_ipv6_csum(struct sk_buff *skb)
904 struct ipv6hdr *iph = ipv6_hdr(skb);
907 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
910 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
911 &iph->saddr, &iph->daddr, 0);
912 tcp_gro_complete(skb);
916 static void qede_gro_receive(struct qede_dev *edev,
917 struct qede_fastpath *fp,
921 /* FW can send a single MTU sized packet from gro flow
922 * due to aggregation timeout/last segment etc. which
923 * is not expected to be a gro packet. If a skb has zero
924 * frags then simply push it in the stack as non gso skb.
926 if (unlikely(!skb->data_len)) {
927 skb_shinfo(skb)->gso_type = 0;
928 skb_shinfo(skb)->gso_size = 0;
933 if (skb_shinfo(skb)->gso_size) {
934 skb_reset_network_header(skb);
936 switch (skb->protocol) {
937 case htons(ETH_P_IP):
938 qede_gro_ip_csum(skb);
940 case htons(ETH_P_IPV6):
941 qede_gro_ipv6_csum(skb);
945 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
946 ntohs(skb->protocol));
952 skb_record_rx_queue(skb, fp->rxq->rxq_id);
953 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
956 static inline void qede_tpa_cont(struct qede_dev *edev,
957 struct qede_rx_queue *rxq,
958 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
962 for (i = 0; cqe->len_list[i]; i++)
963 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
964 le16_to_cpu(cqe->len_list[i]));
968 "Strange - TPA cont with more than a single len_list entry\n");
971 static int qede_tpa_end(struct qede_dev *edev,
972 struct qede_fastpath *fp,
973 struct eth_fast_path_rx_tpa_end_cqe *cqe)
975 struct qede_rx_queue *rxq = fp->rxq;
976 struct qede_agg_info *tpa_info;
980 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
983 if (tpa_info->buffer.page_offset == PAGE_SIZE)
984 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
985 PAGE_SIZE, rxq->data_direction);
987 for (i = 0; cqe->len_list[i]; i++)
988 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
989 le16_to_cpu(cqe->len_list[i]));
992 "Strange - TPA emd with more than a single len_list entry\n");
994 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
998 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
1000 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1001 cqe->num_of_bds, tpa_info->frag_id);
1002 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1004 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1005 le16_to_cpu(cqe->total_packet_len), skb->len);
1007 /* Finalize the SKB */
1008 skb->protocol = eth_type_trans(skb, edev->ndev);
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1011 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1012 * to skb_shinfo(skb)->gso_segs
1014 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1016 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1018 tpa_info->state = QEDE_AGG_STATE_NONE;
1022 tpa_info->state = QEDE_AGG_STATE_NONE;
1024 if (tpa_info->tpa_start_fail) {
1025 qede_reuse_page(rxq, &tpa_info->buffer);
1026 tpa_info->tpa_start_fail = false;
1029 dev_kfree_skb_any(tpa_info->skb);
1030 tpa_info->skb = NULL;
1034 static u8 qede_check_notunn_csum(u16 flag)
1039 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1040 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1041 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1042 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1043 csum = QEDE_CSUM_UNNECESSARY;
1046 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1047 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1049 if (csum_flag & flag)
1050 return QEDE_CSUM_ERROR;
1055 static u8 qede_check_csum(u16 flag)
1057 if (!qede_tunn_exist(flag))
1058 return qede_check_notunn_csum(flag);
1060 return qede_check_tunn_csum(flag);
1063 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1066 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1068 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1069 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1070 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1071 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1077 /* Return true iff packet is to be passed to stack */
1078 static bool qede_rx_xdp(struct qede_dev *edev,
1079 struct qede_fastpath *fp,
1080 struct qede_rx_queue *rxq,
1081 struct bpf_prog *prog,
1082 struct sw_rx_data *bd,
1083 struct eth_fast_path_rx_reg_cqe *cqe,
1084 u16 *data_offset, u16 *len)
1086 struct xdp_buff xdp;
1087 enum xdp_action act;
1089 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
1090 xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
1093 act = bpf_prog_run_xdp(prog, &xdp);
1095 /* Recalculate, as XDP might have changed the headers */
1096 *data_offset = xdp.data - xdp.data_hard_start;
1097 *len = xdp.data_end - xdp.data;
1099 if (act == XDP_PASS)
1102 /* Count number of packets not to be passed to stack */
1107 /* We need the replacement buffer before transmit. */
1108 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1109 qede_recycle_rx_bd_ring(rxq, 1);
1111 trace_xdp_exception(edev->ndev, prog, act);
1115 /* Now if there's a transmission problem, we'd still have to
1116 * throw current buffer, as replacement was already allocated.
1118 if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
1119 *data_offset, *len, bd->data,
1121 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1122 rxq->data_direction);
1123 __free_page(bd->data);
1125 trace_xdp_exception(edev->ndev, prog, act);
1127 dma_sync_single_for_device(rxq->dev,
1128 bd->mapping + *data_offset,
1129 *len, rxq->data_direction);
1130 fp->xdp_xmit |= QEDE_XDP_TX;
1133 /* Regardless, we've consumed an Rx BD */
1134 qede_rx_bd_ring_consume(rxq);
1137 /* We need the replacement buffer before transmit. */
1138 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1139 qede_recycle_rx_bd_ring(rxq, 1);
1141 trace_xdp_exception(edev->ndev, prog, act);
1145 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1146 rxq->data_direction);
1148 if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
1149 DP_NOTICE(edev, "Failed to redirect the packet\n");
1151 fp->xdp_xmit |= QEDE_XDP_REDIRECT;
1153 qede_rx_bd_ring_consume(rxq);
1156 bpf_warn_invalid_xdp_action(edev->ndev, prog, act);
1159 trace_xdp_exception(edev->ndev, prog, act);
1162 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1168 static int qede_rx_build_jumbo(struct qede_dev *edev,
1169 struct qede_rx_queue *rxq,
1170 struct sk_buff *skb,
1171 struct eth_fast_path_rx_reg_cqe *cqe,
1174 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1175 struct sw_rx_data *bd;
1179 pkt_len -= first_bd_len;
1181 /* We've already used one BD for the SKB. Now take care of the rest */
1182 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1183 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1186 if (unlikely(!cur_size)) {
1188 "Still got %d BDs for mapping jumbo, but length became 0\n",
1193 /* We need a replacement buffer for each BD */
1194 if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1197 /* Now that we've allocated the replacement buffer,
1198 * we can safely consume the next BD and map it to the SKB.
1200 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1201 bd = &rxq->sw_rx_ring[bd_cons_idx];
1202 qede_rx_bd_ring_consume(rxq);
1204 dma_unmap_page(rxq->dev, bd->mapping,
1205 PAGE_SIZE, DMA_FROM_DEVICE);
1207 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data,
1208 rxq->rx_headroom, cur_size, PAGE_SIZE);
1210 pkt_len -= cur_size;
1213 if (unlikely(pkt_len))
1215 "Mapped all BDs of jumbo, but still have %d bytes\n",
1222 static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1223 struct qede_fastpath *fp,
1224 struct qede_rx_queue *rxq,
1225 union eth_rx_cqe *cqe,
1226 enum eth_rx_cqe_type type)
1229 case ETH_RX_CQE_TYPE_TPA_START:
1230 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1232 case ETH_RX_CQE_TYPE_TPA_CONT:
1233 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1235 case ETH_RX_CQE_TYPE_TPA_END:
1236 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1242 static int qede_rx_process_cqe(struct qede_dev *edev,
1243 struct qede_fastpath *fp,
1244 struct qede_rx_queue *rxq)
1246 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1247 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1248 u16 len, pad, bd_cons_idx, parse_flag;
1249 enum eth_rx_cqe_type cqe_type;
1250 union eth_rx_cqe *cqe;
1251 struct sw_rx_data *bd;
1252 struct sk_buff *skb;
1256 /* Get the CQE from the completion ring */
1257 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1258 cqe_type = cqe->fast_path_regular.type;
1260 /* Process an unlikely slowpath event */
1261 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1262 struct eth_slow_path_rx_cqe *sp_cqe;
1264 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1265 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1269 /* Handle TPA cqes */
1270 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1271 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1273 /* Get the data from the SW ring; Consume it only after it's evident
1274 * we wouldn't recycle it.
1276 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1277 bd = &rxq->sw_rx_ring[bd_cons_idx];
1279 fp_cqe = &cqe->fast_path_regular;
1280 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1281 pad = fp_cqe->placement_offset + rxq->rx_headroom;
1283 /* Run eBPF program if one is attached */
1285 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1289 /* If this is an error packet then drop it */
1290 flags = cqe->fast_path_regular.pars_flags.flags;
1291 parse_flag = le16_to_cpu(flags);
1293 csum_flag = qede_check_csum(parse_flag);
1294 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1295 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
1298 rxq->rx_hw_errors++;
1301 /* Basic validation passed; Need to prepare an SKB. This would also
1302 * guarantee to finally consume the first BD upon success.
1304 skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
1306 rxq->rx_alloc_errors++;
1307 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1311 /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1314 if (fp_cqe->bd_num > 1) {
1315 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1318 if (unlikely(unmapped_frags > 0)) {
1319 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1320 dev_kfree_skb_any(skb);
1325 /* The SKB contains all the data. Now prepare meta-magic */
1326 skb->protocol = eth_type_trans(skb, edev->ndev);
1327 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1328 qede_set_skb_csum(skb, csum_flag);
1329 skb_record_rx_queue(skb, rxq->rxq_id);
1330 qede_ptp_record_rx_ts(edev, cqe, skb);
1332 /* SKB is prepared - pass it to stack */
1333 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1338 static int qede_rx_int(struct qede_fastpath *fp, int budget)
1340 struct qede_rx_queue *rxq = fp->rxq;
1341 struct qede_dev *edev = fp->edev;
1342 int work_done = 0, rcv_pkts = 0;
1343 u16 hw_comp_cons, sw_comp_cons;
1345 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1346 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1348 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1349 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1350 * read before it is written by FW, then FW writes CQE and SB, and then
1351 * the CPU reads the hw_comp_cons, it will use an old CQE.
1355 /* Loop to complete all indicated BDs */
1356 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1357 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1358 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1359 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1363 rxq->rcv_pkts += rcv_pkts;
1365 /* Allocate replacement buffers */
1366 while (rxq->num_rx_buffers - rxq->filled_buffers)
1367 if (qede_alloc_rx_buffer(rxq, false))
1370 /* Update producers */
1371 qede_update_rx_prod(edev, rxq);
1376 static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1378 qed_sb_update_sb_idx(fp->sb_info);
1380 /* *_has_*_work() reads the status block, thus we need to ensure that
1381 * status block indices have been actually read (qed_sb_update_sb_idx)
1382 * prior to this check (*_has_*_work) so that we won't write the
1383 * "newer" value of the status block to HW (if there was a DMA right
1384 * after qede_has_rx_work and if there is no rmb, the memory reading
1385 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1386 * In this case there will never be another interrupt until there is
1387 * another update of the status block, while there is still unhandled
1392 if (likely(fp->type & QEDE_FASTPATH_RX))
1393 if (qede_has_rx_work(fp->rxq))
1396 if (fp->type & QEDE_FASTPATH_XDP)
1397 if (qede_txq_has_work(fp->xdp_tx))
1400 if (likely(fp->type & QEDE_FASTPATH_TX)) {
1403 for_each_cos_in_txq(fp->edev, cos) {
1404 if (qede_txq_has_work(&fp->txq[cos]))
1412 /*********************
1413 * NDO & API related *
1414 *********************/
1415 int qede_poll(struct napi_struct *napi, int budget)
1417 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1419 struct qede_dev *edev = fp->edev;
1420 int rx_work_done = 0;
1425 if (likely(fp->type & QEDE_FASTPATH_TX)) {
1428 for_each_cos_in_txq(fp->edev, cos) {
1429 if (qede_txq_has_work(&fp->txq[cos]))
1430 qede_tx_int(edev, &fp->txq[cos]);
1434 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1435 qede_xdp_tx_int(edev, fp->xdp_tx);
1437 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1438 qede_has_rx_work(fp->rxq)) ?
1439 qede_rx_int(fp, budget) : 0;
1440 /* Handle case where we are called by netpoll with a budget of 0 */
1441 if (rx_work_done < budget || !budget) {
1442 if (!qede_poll_is_more_work(fp)) {
1443 napi_complete_done(napi, rx_work_done);
1445 /* Update and reenable interrupts */
1446 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1448 rx_work_done = budget;
1452 if (fp->xdp_xmit & QEDE_XDP_TX) {
1453 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1455 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1456 qede_update_tx_producer(fp->xdp_tx);
1459 if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
1462 return rx_work_done;
1465 irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1467 struct qede_fastpath *fp = fp_cookie;
1469 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1471 napi_schedule_irqoff(&fp->napi);
1475 /* Main transmit function */
1476 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1478 struct qede_dev *edev = netdev_priv(ndev);
1479 struct netdev_queue *netdev_txq;
1480 struct qede_tx_queue *txq;
1481 struct eth_tx_1st_bd *first_bd;
1482 struct eth_tx_2nd_bd *second_bd = NULL;
1483 struct eth_tx_3rd_bd *third_bd = NULL;
1484 struct eth_tx_bd *tx_data_bd = NULL;
1485 u16 txq_index, val = 0;
1488 int rc, frag_idx = 0, ipv6_ext = 0;
1492 bool data_split = false;
1494 /* Get tx-queue context and netdev index */
1495 txq_index = skb_get_queue_mapping(skb);
1496 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
1497 txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
1498 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1500 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1502 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1504 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1505 if (qede_pkt_req_lin(skb, xmit_type)) {
1506 if (skb_linearize(skb)) {
1507 txq->tx_mem_alloc_err++;
1509 dev_kfree_skb_any(skb);
1510 return NETDEV_TX_OK;
1515 /* Fill the entry in the SW ring and the BDs in the FW ring */
1516 idx = txq->sw_tx_prod;
1517 txq->sw_tx_ring.skbs[idx].skb = skb;
1518 first_bd = (struct eth_tx_1st_bd *)
1519 qed_chain_produce(&txq->tx_pbl);
1520 memset(first_bd, 0, sizeof(*first_bd));
1521 first_bd->data.bd_flags.bitfields =
1522 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1524 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1525 qede_ptp_tx_ts(edev, skb);
1527 /* Map skb linear data for DMA and set in the first BD */
1528 mapping = dma_map_single(txq->dev, skb->data,
1529 skb_headlen(skb), DMA_TO_DEVICE);
1530 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1531 DP_NOTICE(edev, "SKB mapping failed\n");
1532 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1533 qede_update_tx_producer(txq);
1534 return NETDEV_TX_OK;
1537 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1539 /* In case there is IPv6 with extension headers or LSO we need 2nd and
1542 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1543 second_bd = (struct eth_tx_2nd_bd *)
1544 qed_chain_produce(&txq->tx_pbl);
1545 memset(second_bd, 0, sizeof(*second_bd));
1548 third_bd = (struct eth_tx_3rd_bd *)
1549 qed_chain_produce(&txq->tx_pbl);
1550 memset(third_bd, 0, sizeof(*third_bd));
1553 /* We need to fill in additional data in second_bd... */
1554 tx_data_bd = (struct eth_tx_bd *)second_bd;
1557 if (skb_vlan_tag_present(skb)) {
1558 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1559 first_bd->data.bd_flags.bitfields |=
1560 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1563 /* Fill the parsing flags & params according to the requested offload */
1564 if (xmit_type & XMIT_L4_CSUM) {
1565 /* We don't re-calculate IP checksum as it is already done by
1568 first_bd->data.bd_flags.bitfields |=
1569 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1571 if (xmit_type & XMIT_ENC) {
1572 first_bd->data.bd_flags.bitfields |=
1573 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1575 val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1578 /* Legacy FW had flipped behavior in regard to this bit -
1579 * I.e., needed to set to prevent FW from touching encapsulated
1580 * packets when it didn't need to.
1582 if (unlikely(txq->is_legacy))
1583 val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1585 /* If the packet is IPv6 with extension header, indicate that
1586 * to FW and pass few params, since the device cracker doesn't
1587 * support parsing IPv6 with extension header/s.
1589 if (unlikely(ipv6_ext))
1590 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1593 if (xmit_type & XMIT_LSO) {
1594 first_bd->data.bd_flags.bitfields |=
1595 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1596 third_bd->data.lso_mss =
1597 cpu_to_le16(skb_shinfo(skb)->gso_size);
1599 if (unlikely(xmit_type & XMIT_ENC)) {
1600 first_bd->data.bd_flags.bitfields |=
1601 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1603 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1604 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1606 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1608 hlen = qede_get_skb_hlen(skb, true);
1610 first_bd->data.bd_flags.bitfields |=
1611 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1612 hlen = qede_get_skb_hlen(skb, false);
1615 /* @@@TBD - if will not be removed need to check */
1616 third_bd->data.bitfields |=
1617 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1619 /* Make life easier for FW guys who can't deal with header and
1620 * data on same BD. If we need to split, use the second bd...
1622 if (unlikely(skb_headlen(skb) > hlen)) {
1623 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1624 "TSO split header size is %d (%x:%x)\n",
1625 first_bd->nbytes, first_bd->addr.hi,
1628 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1629 le32_to_cpu(first_bd->addr.lo)) +
1632 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1633 le16_to_cpu(first_bd->nbytes) -
1636 /* this marks the BD as one that has no
1637 * individual mapping
1639 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1641 first_bd->nbytes = cpu_to_le16(hlen);
1643 tx_data_bd = (struct eth_tx_bd *)third_bd;
1647 if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
1648 DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
1649 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1650 qede_update_tx_producer(txq);
1651 return NETDEV_TX_OK;
1654 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1655 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1658 first_bd->data.bitfields = cpu_to_le16(val);
1660 /* Handle fragmented skb */
1661 /* special handle for frags inside 2nd and 3rd bds.. */
1662 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1663 rc = map_frag_to_bd(txq,
1664 &skb_shinfo(skb)->frags[frag_idx],
1667 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1668 qede_update_tx_producer(txq);
1669 return NETDEV_TX_OK;
1672 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1673 tx_data_bd = (struct eth_tx_bd *)third_bd;
1680 /* map last frags into 4th, 5th .... */
1681 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1682 tx_data_bd = (struct eth_tx_bd *)
1683 qed_chain_produce(&txq->tx_pbl);
1685 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1687 rc = map_frag_to_bd(txq,
1688 &skb_shinfo(skb)->frags[frag_idx],
1691 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1692 qede_update_tx_producer(txq);
1693 return NETDEV_TX_OK;
1697 /* update the first BD with the actual num BDs */
1698 first_bd->data.nbds = nbd;
1700 netdev_tx_sent_queue(netdev_txq, skb->len);
1702 skb_tx_timestamp(skb);
1704 /* Advance packet producer only before sending the packet since mapping
1705 * of pages may fail.
1707 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1709 /* 'next page' entries are counted in the producer value */
1710 txq->tx_db.data.bd_prod =
1711 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1713 if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
1714 qede_update_tx_producer(txq);
1716 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1717 < (MAX_SKB_FRAGS + 1))) {
1718 if (netdev_xmit_more())
1719 qede_update_tx_producer(txq);
1721 netif_tx_stop_queue(netdev_txq);
1723 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1724 "Stop queue was called\n");
1725 /* paired memory barrier is in qede_tx_int(), we have to keep
1726 * ordering of set_bit() in netif_tx_stop_queue() and read of
1731 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1732 (MAX_SKB_FRAGS + 1)) &&
1733 (edev->state == QEDE_STATE_OPEN)) {
1734 netif_tx_wake_queue(netdev_txq);
1735 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1736 "Wake queue was called\n");
1740 return NETDEV_TX_OK;
1743 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1744 struct net_device *sb_dev)
1746 struct qede_dev *edev = netdev_priv(dev);
1749 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1751 return QEDE_TSS_COUNT(edev) ?
1752 netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
1755 /* 8B udp header + 8B base tunnel header + 32B option length */
1756 #define QEDE_MAX_TUN_HDR_LEN 48
1758 netdev_features_t qede_features_check(struct sk_buff *skb,
1759 struct net_device *dev,
1760 netdev_features_t features)
1762 if (skb->encapsulation) {
1765 switch (vlan_get_protocol(skb)) {
1766 case htons(ETH_P_IP):
1767 l4_proto = ip_hdr(skb)->protocol;
1769 case htons(ETH_P_IPV6):
1770 l4_proto = ipv6_hdr(skb)->nexthdr;
1776 /* Disable offloads for geneve tunnels, as HW can't parse
1777 * the geneve header which has option length greater than 32b
1778 * and disable offloads for the ports which are not offloaded.
1780 if (l4_proto == IPPROTO_UDP) {
1781 struct qede_dev *edev = netdev_priv(dev);
1782 u16 hdrlen, vxln_port, gnv_port;
1784 hdrlen = QEDE_MAX_TUN_HDR_LEN;
1785 vxln_port = edev->vxlan_dst_port;
1786 gnv_port = edev->geneve_dst_port;
1788 if ((skb_inner_mac_header(skb) -
1789 skb_transport_header(skb)) > hdrlen ||
1790 (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1791 ntohs(udp_hdr(skb)->dest) != gnv_port))
1792 return features & ~(NETIF_F_CSUM_MASK |
1794 } else if (l4_proto == IPPROTO_IPIP) {
1795 /* IPIP tunnels are unknown to the device or at least unsupported natively,
1796 * offloads for them can't be done trivially, so disable them for such skb.
1798 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);