1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
13 static void ionic_rx_clean(struct ionic_queue *q,
14 struct ionic_desc_info *desc_info,
15 struct ionic_cq_info *cq_info,
18 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
20 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
22 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
23 ionic_desc_cb cb_func, void *cb_arg)
25 DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
27 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
30 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
31 ionic_desc_cb cb_func, void *cb_arg)
33 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
35 DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
38 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
40 return netdev_get_tx_queue(q->lif->netdev, q->index);
43 static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
44 unsigned int len, bool frags)
46 struct ionic_lif *lif = q->lif;
47 struct ionic_rx_stats *stats;
48 struct net_device *netdev;
52 stats = q_to_rx_stats(q);
55 skb = napi_get_frags(&q_to_qcq(q)->napi);
57 skb = netdev_alloc_skb_ip_align(netdev, len);
60 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
61 netdev->name, q->name);
69 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
70 struct ionic_desc_info *desc_info,
71 struct ionic_cq_info *cq_info)
73 struct ionic_rxq_comp *comp = cq_info->cq_desc;
74 struct device *dev = q->lif->ionic->dev;
75 struct ionic_page_info *page_info;
81 page_info = &desc_info->pages[0];
82 len = le16_to_cpu(comp->len);
84 prefetch(page_address(page_info->page) + NET_IP_ALIGN);
86 skb = ionic_rx_skb_alloc(q, len, true);
90 i = comp->num_sg_elems + 1;
92 if (unlikely(!page_info->page)) {
93 struct napi_struct *napi = &q_to_qcq(q)->napi;
100 frag_len = min(len, (u16)PAGE_SIZE);
103 dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
104 PAGE_SIZE, DMA_FROM_DEVICE);
105 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
106 page_info->page, 0, frag_len, PAGE_SIZE);
107 page_info->page = NULL;
115 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
116 struct ionic_desc_info *desc_info,
117 struct ionic_cq_info *cq_info)
119 struct ionic_rxq_comp *comp = cq_info->cq_desc;
120 struct device *dev = q->lif->ionic->dev;
121 struct ionic_page_info *page_info;
125 page_info = &desc_info->pages[0];
126 len = le16_to_cpu(comp->len);
128 skb = ionic_rx_skb_alloc(q, len, false);
132 if (unlikely(!page_info->page)) {
137 dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
138 len, DMA_FROM_DEVICE);
139 skb_copy_to_linear_data(skb, page_address(page_info->page), len);
140 dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
141 len, DMA_FROM_DEVICE);
144 skb->protocol = eth_type_trans(skb, q->lif->netdev);
149 static void ionic_rx_clean(struct ionic_queue *q,
150 struct ionic_desc_info *desc_info,
151 struct ionic_cq_info *cq_info,
154 struct ionic_rxq_comp *comp = cq_info->cq_desc;
155 struct ionic_qcq *qcq = q_to_qcq(q);
156 struct ionic_rx_stats *stats;
157 struct net_device *netdev;
160 stats = q_to_rx_stats(q);
161 netdev = q->lif->netdev;
169 stats->bytes += le16_to_cpu(comp->len);
171 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
172 skb = ionic_rx_copybreak(q, desc_info, cq_info);
174 skb = ionic_rx_frags(q, desc_info, cq_info);
176 if (unlikely(!skb)) {
181 skb_record_rx_queue(skb, q->index);
183 if (likely(netdev->features & NETIF_F_RXHASH)) {
184 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
185 case IONIC_PKT_TYPE_IPV4:
186 case IONIC_PKT_TYPE_IPV6:
187 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
190 case IONIC_PKT_TYPE_IPV4_TCP:
191 case IONIC_PKT_TYPE_IPV6_TCP:
192 case IONIC_PKT_TYPE_IPV4_UDP:
193 case IONIC_PKT_TYPE_IPV6_UDP:
194 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
200 if (likely(netdev->features & NETIF_F_RXCSUM)) {
201 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
202 skb->ip_summed = CHECKSUM_COMPLETE;
203 skb->csum = (__wsum)le16_to_cpu(comp->csum);
204 stats->csum_complete++;
210 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
211 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
212 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
215 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
216 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
217 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
218 le16_to_cpu(comp->vlan_tci));
219 stats->vlan_stripped++;
222 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
223 napi_gro_receive(&qcq->napi, skb);
225 napi_gro_frags(&qcq->napi);
228 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
230 struct ionic_rxq_comp *comp = cq_info->cq_desc;
231 struct ionic_queue *q = cq->bound_q;
232 struct ionic_desc_info *desc_info;
234 if (!color_match(comp->pkt_type_color, cq->done_color))
237 /* check for empty queue */
238 if (q->tail->index == q->head->index)
242 if (desc_info->index != le16_to_cpu(comp->comp_index))
245 q->tail = desc_info->next;
247 /* clean the related q entry, only one per qc completion */
248 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
250 desc_info->cb = NULL;
251 desc_info->cb_arg = NULL;
256 void ionic_rx_flush(struct ionic_cq *cq)
258 struct ionic_dev *idev = &cq->lif->ionic->idev;
261 work_done = ionic_cq_service(cq, cq->num_descs,
262 ionic_rx_service, NULL, NULL);
265 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
266 work_done, IONIC_INTR_CRED_RESET_COALESCE);
269 static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
270 dma_addr_t *dma_addr)
272 struct ionic_lif *lif = q->lif;
273 struct ionic_rx_stats *stats;
274 struct net_device *netdev;
278 netdev = lif->netdev;
279 dev = lif->ionic->dev;
280 stats = q_to_rx_stats(q);
281 page = alloc_page(GFP_ATOMIC);
282 if (unlikely(!page)) {
283 net_err_ratelimited("%s: Page alloc failed on %s!\n",
284 netdev->name, q->name);
289 *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
290 if (unlikely(dma_mapping_error(dev, *dma_addr))) {
292 net_err_ratelimited("%s: DMA single map failed on %s!\n",
293 netdev->name, q->name);
294 stats->dma_map_err++;
301 static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
304 struct ionic_lif *lif = q->lif;
305 struct net_device *netdev;
308 netdev = lif->netdev;
309 dev = lif->ionic->dev;
311 if (unlikely(!page)) {
312 net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
313 netdev->name, q->name);
317 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
322 void ionic_rx_fill(struct ionic_queue *q)
324 struct net_device *netdev = q->lif->netdev;
325 struct ionic_desc_info *desc_info;
326 struct ionic_page_info *page_info;
327 struct ionic_rxq_sg_desc *sg_desc;
328 struct ionic_rxq_sg_elem *sg_elem;
329 struct ionic_rxq_desc *desc;
330 unsigned int remain_len;
331 unsigned int seg_len;
336 len = netdev->mtu + ETH_HLEN;
337 nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
339 for (i = ionic_q_space_avail(q); i; i--) {
342 desc = desc_info->desc;
343 sg_desc = desc_info->sg_desc;
344 page_info = &desc_info->pages[0];
346 if (page_info->page) { /* recycle the buffer */
347 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
351 /* fill main descriptor - pages[0] */
352 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
353 IONIC_RXQ_DESC_OPCODE_SIMPLE;
354 desc_info->npages = nfrags;
355 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
356 if (unlikely(!page_info->page)) {
361 desc->addr = cpu_to_le64(page_info->dma_addr);
362 seg_len = min_t(unsigned int, PAGE_SIZE, len);
363 desc->len = cpu_to_le16(seg_len);
364 remain_len -= seg_len;
367 /* fill sg descriptors - pages[1..n] */
368 for (j = 0; j < nfrags - 1; j++) {
369 if (page_info->page) /* recycle the sg buffer */
372 sg_elem = &sg_desc->elems[j];
373 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
374 if (unlikely(!page_info->page)) {
379 sg_elem->addr = cpu_to_le64(page_info->dma_addr);
380 seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
381 sg_elem->len = cpu_to_le16(seg_len);
382 remain_len -= seg_len;
386 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
389 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
390 q->dbval | q->head->index);
393 static void ionic_rx_fill_cb(void *arg)
398 void ionic_rx_empty(struct ionic_queue *q)
400 struct ionic_desc_info *cur;
401 struct ionic_rxq_desc *desc;
404 for (cur = q->tail; cur != q->head; cur = cur->next) {
409 for (i = 0; i < cur->npages; i++) {
410 if (likely(cur->pages[i].page)) {
411 ionic_rx_page_free(q, cur->pages[i].page,
412 cur->pages[i].dma_addr);
413 cur->pages[i].page = NULL;
414 cur->pages[i].dma_addr = 0;
422 int ionic_tx_napi(struct napi_struct *napi, int budget)
424 struct ionic_qcq *qcq = napi_to_qcq(napi);
425 struct ionic_cq *cq = napi_to_cq(napi);
426 struct ionic_dev *idev;
427 struct ionic_lif *lif;
431 lif = cq->bound_q->lif;
432 idev = &lif->ionic->idev;
434 work_done = ionic_cq_service(cq, budget,
435 ionic_tx_service, NULL, NULL);
437 if (work_done < budget && napi_complete_done(napi, work_done)) {
438 flags |= IONIC_INTR_CRED_UNMASK;
439 DEBUG_STATS_INTR_REARM(cq->bound_intr);
442 if (work_done || flags) {
443 flags |= IONIC_INTR_CRED_RESET_COALESCE;
444 ionic_intr_credits(idev->intr_ctrl,
445 cq->bound_intr->index,
449 DEBUG_STATS_NAPI_POLL(qcq, work_done);
454 int ionic_rx_napi(struct napi_struct *napi, int budget)
456 struct ionic_qcq *qcq = napi_to_qcq(napi);
457 struct ionic_cq *cq = napi_to_cq(napi);
458 struct ionic_dev *idev;
459 struct ionic_lif *lif;
463 lif = cq->bound_q->lif;
464 idev = &lif->ionic->idev;
466 work_done = ionic_cq_service(cq, budget,
467 ionic_rx_service, NULL, NULL);
470 ionic_rx_fill(cq->bound_q);
472 if (work_done < budget && napi_complete_done(napi, work_done)) {
473 flags |= IONIC_INTR_CRED_UNMASK;
474 DEBUG_STATS_INTR_REARM(cq->bound_intr);
477 if (work_done || flags) {
478 flags |= IONIC_INTR_CRED_RESET_COALESCE;
479 ionic_intr_credits(idev->intr_ctrl,
480 cq->bound_intr->index,
484 DEBUG_STATS_NAPI_POLL(qcq, work_done);
489 int ionic_txrx_napi(struct napi_struct *napi, int budget)
491 struct ionic_qcq *qcq = napi_to_qcq(napi);
492 struct ionic_cq *rxcq = napi_to_cq(napi);
493 unsigned int qi = rxcq->bound_q->index;
494 struct ionic_dev *idev;
495 struct ionic_lif *lif;
496 struct ionic_cq *txcq;
497 u32 rx_work_done = 0;
498 u32 tx_work_done = 0;
503 lif = rxcq->bound_q->lif;
504 idev = &lif->ionic->idev;
505 txcq = &lif->txqcqs[qi].qcq->cq;
507 tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
508 ionic_tx_service, NULL, NULL);
510 rx_work_done = ionic_cq_service(rxcq, budget,
511 ionic_rx_service, NULL, NULL);
513 ionic_rx_fill_cb(rxcq->bound_q);
515 unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget);
517 if (unmask && napi_complete_done(napi, rx_work_done)) {
518 flags |= IONIC_INTR_CRED_UNMASK;
519 DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
520 work_done = rx_work_done;
525 if (work_done || flags) {
526 flags |= IONIC_INTR_CRED_RESET_COALESCE;
527 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
528 tx_work_done + rx_work_done, flags);
531 DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
532 DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
537 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
538 void *data, size_t len)
540 struct ionic_tx_stats *stats = q_to_tx_stats(q);
541 struct device *dev = q->lif->ionic->dev;
544 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
545 if (dma_mapping_error(dev, dma_addr)) {
546 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
547 q->lif->netdev->name, q->name);
548 stats->dma_map_err++;
554 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
555 const skb_frag_t *frag,
556 size_t offset, size_t len)
558 struct ionic_tx_stats *stats = q_to_tx_stats(q);
559 struct device *dev = q->lif->ionic->dev;
562 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
563 if (dma_mapping_error(dev, dma_addr)) {
564 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
565 q->lif->netdev->name, q->name);
566 stats->dma_map_err++;
571 static void ionic_tx_clean(struct ionic_queue *q,
572 struct ionic_desc_info *desc_info,
573 struct ionic_cq_info *cq_info,
576 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
577 struct ionic_txq_sg_elem *elem = sg_desc->elems;
578 struct ionic_tx_stats *stats = q_to_tx_stats(q);
579 struct ionic_txq_desc *desc = desc_info->desc;
580 struct device *dev = q->lif->ionic->dev;
581 u8 opcode, flags, nsge;
586 decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
587 &opcode, &flags, &nsge, &addr);
589 /* use unmap_single only if either this is not TSO,
590 * or this is first descriptor of a TSO
592 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
593 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
594 dma_unmap_single(dev, (dma_addr_t)addr,
595 le16_to_cpu(desc->len), DMA_TO_DEVICE);
597 dma_unmap_page(dev, (dma_addr_t)addr,
598 le16_to_cpu(desc->len), DMA_TO_DEVICE);
600 for (i = 0; i < nsge; i++, elem++)
601 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
602 le16_to_cpu(elem->len), DMA_TO_DEVICE);
605 struct sk_buff *skb = cb_arg;
608 queue_index = skb_get_queue_mapping(skb);
609 if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
611 netif_wake_subqueue(q->lif->netdev, queue_index);
614 dev_kfree_skb_any(skb);
616 netdev_tx_completed_queue(q_to_ndq(q), 1, len);
620 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
622 struct ionic_txq_comp *comp = cq_info->cq_desc;
623 struct ionic_queue *q = cq->bound_q;
624 struct ionic_desc_info *desc_info;
626 if (!color_match(comp->color, cq->done_color))
629 /* clean the related q entries, there could be
630 * several q entries completed for each cq completion
634 q->tail = desc_info->next;
635 ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
636 desc_info->cb = NULL;
637 desc_info->cb_arg = NULL;
638 } while (desc_info->index != le16_to_cpu(comp->comp_index));
643 void ionic_tx_flush(struct ionic_cq *cq)
645 struct ionic_dev *idev = &cq->lif->ionic->idev;
648 work_done = ionic_cq_service(cq, cq->num_descs,
649 ionic_tx_service, NULL, NULL);
651 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
652 work_done, IONIC_INTR_CRED_RESET_COALESCE);
655 void ionic_tx_empty(struct ionic_queue *q)
657 struct ionic_desc_info *desc_info;
660 /* walk the not completed tx entries, if any */
661 while (q->head != q->tail) {
663 q->tail = desc_info->next;
664 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
665 desc_info->cb = NULL;
666 desc_info->cb_arg = NULL;
671 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
675 err = skb_cow_head(skb, 0);
679 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
680 inner_ip_hdr(skb)->check = 0;
681 inner_tcp_hdr(skb)->check =
682 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
683 inner_ip_hdr(skb)->daddr,
685 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
686 inner_tcp_hdr(skb)->check =
687 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
688 &inner_ipv6_hdr(skb)->daddr,
695 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
699 err = skb_cow_head(skb, 0);
703 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
704 ip_hdr(skb)->check = 0;
705 tcp_hdr(skb)->check =
706 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
709 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
710 tcp_v6_gso_csum_prep(skb);
716 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
718 dma_addr_t addr, u8 nsge, u16 len,
719 unsigned int hdrlen, unsigned int mss,
721 u16 vlan_tci, bool has_vlan,
722 bool start, bool done)
727 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
728 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
729 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
730 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
732 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
733 desc->cmd = cpu_to_le64(cmd);
734 desc->len = cpu_to_le16(len);
735 desc->vlan_tci = cpu_to_le16(vlan_tci);
736 desc->hdr_len = cpu_to_le16(hdrlen);
737 desc->mss = cpu_to_le16(mss);
740 skb_tx_timestamp(skb);
741 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
742 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
744 ionic_txq_post(q, false, ionic_tx_clean, NULL);
748 static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
749 struct ionic_txq_sg_elem **elem)
751 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
752 struct ionic_txq_desc *desc = q->head->desc;
754 *elem = sg_desc->elems;
758 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
760 struct ionic_tx_stats *stats = q_to_tx_stats(q);
761 struct ionic_desc_info *abort = q->head;
762 struct device *dev = q->lif->ionic->dev;
763 struct ionic_desc_info *rewind = abort;
764 struct ionic_txq_sg_elem *elem;
765 struct ionic_txq_desc *desc;
766 unsigned int frag_left = 0;
767 unsigned int offset = 0;
768 unsigned int len_left;
769 dma_addr_t desc_addr;
788 mss = skb_shinfo(skb)->gso_size;
789 nfrags = skb_shinfo(skb)->nr_frags;
790 len_left = skb->len - skb_headlen(skb);
791 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
792 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
793 has_vlan = !!skb_vlan_tag_present(skb);
794 vlan_tci = skb_vlan_tag_get(skb);
795 encap = skb->encapsulation;
797 /* Preload inner-most TCP csum field with IP pseudo hdr
798 * calculated with IP length set to zero. HW will later
799 * add in length to each TCP segment resulting from the TSO.
803 err = ionic_tx_tcp_inner_pseudo_csum(skb);
805 err = ionic_tx_tcp_pseudo_csum(skb);
810 hdrlen = skb_inner_transport_header(skb) - skb->data +
811 inner_tcp_hdrlen(skb);
813 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
815 seglen = hdrlen + mss;
816 left = skb_headlen(skb);
818 desc = ionic_tx_tso_next(q, &elem);
821 /* Chop skb->data up into desc segments */
824 len = min(seglen, left);
825 frag_left = seglen - len;
826 desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
827 if (dma_mapping_error(dev, desc_addr))
833 if (nfrags > 0 && frag_left > 0)
835 done = (nfrags == 0 && left == 0);
836 ionic_tx_tso_post(q, desc, skb,
837 desc_addr, desc_nsge, desc_len,
843 total_bytes += start ? len : len + hdrlen;
844 desc = ionic_tx_tso_next(q, &elem);
849 /* Chop skb frags into desc segments */
851 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
853 left = skb_frag_size(frag);
860 len = min(frag_left, left);
863 cpu_to_le64(ionic_tx_map_frag(q, frag,
865 if (dma_mapping_error(dev, elem->addr))
867 elem->len = cpu_to_le16(len);
872 if (nfrags > 0 && frag_left > 0)
874 done = (nfrags == 0 && left == 0);
875 ionic_tx_tso_post(q, desc, skb, desc_addr,
877 hdrlen, mss, outer_csum,
881 total_bytes += start ? len : len + hdrlen;
882 desc = ionic_tx_tso_next(q, &elem);
885 len = min(mss, left);
886 frag_left = mss - len;
887 desc_addr = ionic_tx_map_frag(q, frag,
889 if (dma_mapping_error(dev, desc_addr))
895 if (nfrags > 0 && frag_left > 0)
897 done = (nfrags == 0 && left == 0);
898 ionic_tx_tso_post(q, desc, skb, desc_addr,
900 hdrlen, mss, outer_csum,
904 total_bytes += start ? len : len + hdrlen;
905 desc = ionic_tx_tso_next(q, &elem);
911 stats->pkts += total_pkts;
912 stats->bytes += total_bytes;
914 stats->tso_bytes += total_bytes;
919 while (rewind->desc != q->head->desc) {
920 ionic_tx_clean(q, rewind, NULL, NULL);
921 rewind = rewind->next;
928 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
930 struct ionic_tx_stats *stats = q_to_tx_stats(q);
931 struct ionic_txq_desc *desc = q->head->desc;
932 struct device *dev = q->lif->ionic->dev;
939 has_vlan = !!skb_vlan_tag_present(skb);
940 encap = skb->encapsulation;
942 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
943 if (dma_mapping_error(dev, dma_addr))
946 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
947 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
949 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
950 flags, skb_shinfo(skb)->nr_frags, dma_addr);
951 desc->cmd = cpu_to_le64(cmd);
952 desc->len = cpu_to_le16(skb_headlen(skb));
953 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
954 desc->csum_offset = cpu_to_le16(skb->csum_offset);
956 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
957 stats->vlan_inserted++;
960 if (skb->csum_not_inet)
968 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
970 struct ionic_tx_stats *stats = q_to_tx_stats(q);
971 struct ionic_txq_desc *desc = q->head->desc;
972 struct device *dev = q->lif->ionic->dev;
979 has_vlan = !!skb_vlan_tag_present(skb);
980 encap = skb->encapsulation;
982 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
983 if (dma_mapping_error(dev, dma_addr))
986 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
987 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
989 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
990 flags, skb_shinfo(skb)->nr_frags, dma_addr);
991 desc->cmd = cpu_to_le64(cmd);
992 desc->len = cpu_to_le16(skb_headlen(skb));
994 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
995 stats->vlan_inserted++;
1003 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
1005 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
1006 unsigned int len_left = skb->len - skb_headlen(skb);
1007 struct ionic_txq_sg_elem *elem = sg_desc->elems;
1008 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1009 struct device *dev = q->lif->ionic->dev;
1010 dma_addr_t dma_addr;
1014 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
1015 len = skb_frag_size(frag);
1016 elem->len = cpu_to_le16(len);
1017 dma_addr = ionic_tx_map_frag(q, frag, 0, len);
1018 if (dma_mapping_error(dev, dma_addr))
1020 elem->addr = cpu_to_le64(dma_addr);
1028 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1030 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1033 /* set up the initial descriptor */
1034 if (skb->ip_summed == CHECKSUM_PARTIAL)
1035 err = ionic_tx_calc_csum(q, skb);
1037 err = ionic_tx_calc_no_csum(q, skb);
1042 err = ionic_tx_skb_frags(q, skb);
1046 skb_tx_timestamp(skb);
1048 stats->bytes += skb->len;
1050 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1051 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1056 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1058 int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
1059 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1062 /* If TSO, need roundup(skb->len/mss) descs */
1063 if (skb_is_gso(skb))
1064 return (skb->len / skb_shinfo(skb)->gso_size) + 1;
1066 /* If non-TSO, just need 1 desc and nr_frags sg elems */
1067 if (skb_shinfo(skb)->nr_frags <= sg_elems)
1070 /* Too many frags, so linearize */
1071 err = skb_linearize(skb);
1077 /* Need 1 desc and zero sg elems */
1081 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1085 if (unlikely(!ionic_q_has_space(q, ndescs))) {
1086 netif_stop_subqueue(q->lif->netdev, q->index);
1090 /* Might race with ionic_tx_clean, check again */
1092 if (ionic_q_has_space(q, ndescs)) {
1093 netif_wake_subqueue(q->lif->netdev, q->index);
1101 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1103 u16 queue_index = skb_get_queue_mapping(skb);
1104 struct ionic_lif *lif = netdev_priv(netdev);
1105 struct ionic_queue *q;
1109 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1111 return NETDEV_TX_OK;
1114 if (unlikely(!lif_to_txqcq(lif, queue_index)))
1116 q = lif_to_txq(lif, queue_index);
1118 ndescs = ionic_tx_descs_needed(q, skb);
1122 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1123 return NETDEV_TX_BUSY;
1125 if (skb_is_gso(skb))
1126 err = ionic_tx_tso(q, skb);
1128 err = ionic_tx(q, skb);
1133 /* Stop the queue if there aren't descriptors for the next packet.
1134 * Since our SG lists per descriptor take care of most of the possible
1135 * fragmentation, we don't need to have many descriptors available.
1137 ionic_maybe_stop_tx(q, 4);
1139 return NETDEV_TX_OK;
1145 return NETDEV_TX_OK;