1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/etherdevice.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_trace.h>
13 #include <net/ip6_checksum.h>
16 #include "otx2_common.h"
17 #include "otx2_struct.h"
18 #include "otx2_txrx.h"
22 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
23 #define PTP_PORT 0x13F
24 /* PTPv2 header Original Timestamp starts at byte offset 34 and
25 * contains 6 byte seconds field and 4 byte nano seconds field.
27 #define PTP_SYNC_SEC_OFFSET 34
29 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
30 struct bpf_prog *prog,
31 struct nix_cqe_rx_s *cqe,
32 struct otx2_cq_queue *cq);
34 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
35 struct otx2_cq_queue *cq)
37 u64 incr = (u64)(cq->cq_idx) << 32;
40 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
42 if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
43 status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
44 dev_err(pfvf->dev, "CQ stopped due to error");
48 cq->cq_tail = status & 0xFFFFF;
49 cq->cq_head = (status >> 20) & 0xFFFFF;
50 if (cq->cq_tail < cq->cq_head)
51 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
54 cq->pend_cqe = cq->cq_tail - cq->cq_head;
59 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
61 struct nix_cqe_hdr_s *cqe_hdr;
63 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
64 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
68 cq->cq_head &= (cq->cqe_cnt - 1);
73 static unsigned int frag_num(unsigned int i)
76 return (i & ~3) + 3 - (i & 3);
82 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
83 struct sk_buff *skb, int seg, int *len)
85 const skb_frag_t *frag;
89 /* First segment is always skb->data */
91 page = virt_to_page(skb->data);
92 offset = offset_in_page(skb->data);
93 *len = skb_headlen(skb);
95 frag = &skb_shinfo(skb)->frags[seg - 1];
96 page = skb_frag_page(frag);
97 offset = skb_frag_off(frag);
98 *len = skb_frag_size(frag);
100 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
103 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
107 for (seg = 0; seg < sg->num_segs; seg++) {
108 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
109 sg->size[seg], DMA_TO_DEVICE);
114 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
115 struct otx2_snd_queue *sq,
116 struct nix_cqe_tx_s *cqe)
118 struct nix_send_comp_s *snd_comp = &cqe->comp;
123 sg = &sq->sg[snd_comp->sqe_id];
125 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
126 otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
127 sg->size[0], DMA_TO_DEVICE);
128 page = virt_to_page(phys_to_virt(pa));
132 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
133 struct otx2_cq_queue *cq,
134 struct otx2_snd_queue *sq,
135 struct nix_cqe_tx_s *cqe,
136 int budget, int *tx_pkts, int *tx_bytes)
138 struct nix_send_comp_s *snd_comp = &cqe->comp;
139 struct skb_shared_hwtstamps ts;
140 struct sk_buff *skb = NULL;
145 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
146 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
147 pfvf->netdev->name, cq->cint_idx,
150 sg = &sq->sg[snd_comp->sqe_id];
151 skb = (struct sk_buff *)sg->skb;
155 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
156 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
157 if (timestamp != 1) {
158 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
159 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
161 memset(&ts, 0, sizeof(ts));
162 ts.hwtstamp = ns_to_ktime(tsns);
163 skb_tstamp_tx(skb, &ts);
168 *tx_bytes += skb->len;
170 otx2_dma_unmap_skb_frags(pfvf, sg);
171 napi_consume_skb(skb, budget);
175 static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
176 struct sk_buff *skb, void *data)
181 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
184 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
185 /* The first 8 bytes is the timestamp */
186 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
190 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
193 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
194 u64 iova, int len, struct nix_rx_parse_s *parse,
201 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
203 if (likely(!skb_shinfo(skb)->nr_frags)) {
204 /* Check if data starts at some nonzero offset
205 * from the start of the buffer. For now the
206 * only possible offset is 8 bytes in the case
207 * where packet is prepended by a timestamp.
210 otx2_set_rxtstamp(pfvf, skb, va);
211 off = OTX2_HW_TIMESTAMP_LEN;
215 page = virt_to_page(va);
216 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
217 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
218 va - page_address(page) + off,
219 len - off, pfvf->rbsize);
223 /* If more than MAX_SKB_FRAGS fragments are received then
224 * give back those buffer pointers to hardware for reuse.
226 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
231 static void otx2_set_rxhash(struct otx2_nic *pfvf,
232 struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
234 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
235 struct otx2_rss_info *rss;
238 if (!(pfvf->netdev->features & NETIF_F_RXHASH))
241 rss = &pfvf->hw.rss_info;
242 if (rss->flowkey_cfg) {
243 if (rss->flowkey_cfg &
244 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
245 hash_type = PKT_HASH_TYPE_L4;
247 hash_type = PKT_HASH_TYPE_L3;
248 hash = cqe->hdr.flow_tag;
250 skb_set_hash(skb, hash, hash_type);
253 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
256 struct nix_rx_sg_s *sg = &cqe->sg;
262 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
263 while (start < end) {
264 sg = (struct nix_rx_sg_s *)start;
265 seg_addr = &sg->seg_addr;
266 for (seg = 0; seg < sg->segs; seg++, seg_addr++)
267 pfvf->hw_ops->aura_freeptr(pfvf, qidx,
268 *seg_addr & ~0x07ULL);
269 start += sizeof(*sg);
273 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
274 struct nix_cqe_rx_s *cqe, int qidx)
276 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
277 struct nix_rx_parse_s *parse = &cqe->parse;
279 if (netif_msg_rx_err(pfvf))
280 netdev_err(pfvf->netdev,
281 "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
282 qidx, parse->errlev, parse->errcode);
284 if (parse->errlev == NPC_ERRLVL_RE) {
285 switch (parse->errcode) {
287 case ERRCODE_FCS_RCV:
288 atomic_inc(&stats->rx_fcs_errs);
290 case ERRCODE_UNDERSIZE:
291 atomic_inc(&stats->rx_undersize_errs);
293 case ERRCODE_OVERSIZE:
294 atomic_inc(&stats->rx_oversize_errs);
296 case ERRCODE_OL2_LEN_MISMATCH:
297 atomic_inc(&stats->rx_len_errs);
300 atomic_inc(&stats->rx_other_errs);
303 } else if (parse->errlev == NPC_ERRLVL_NIX) {
304 switch (parse->errcode) {
305 case ERRCODE_OL3_LEN:
306 case ERRCODE_OL4_LEN:
307 case ERRCODE_IL3_LEN:
308 case ERRCODE_IL4_LEN:
309 atomic_inc(&stats->rx_len_errs);
311 case ERRCODE_OL4_CSUM:
312 case ERRCODE_IL4_CSUM:
313 atomic_inc(&stats->rx_csum_errs);
316 atomic_inc(&stats->rx_other_errs);
320 atomic_inc(&stats->rx_other_errs);
321 /* For now ignore all the NPC parser errors and
322 * pass the packets to stack.
327 /* If RXALL is enabled pass on packets to stack. */
328 if (pfvf->netdev->features & NETIF_F_RXALL)
331 /* Free buffer back to pool */
333 otx2_free_rcv_seg(pfvf, cqe, qidx);
337 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
338 struct napi_struct *napi,
339 struct otx2_cq_queue *cq,
340 struct nix_cqe_rx_s *cqe)
342 struct nix_rx_parse_s *parse = &cqe->parse;
343 struct nix_rx_sg_s *sg = &cqe->sg;
344 struct sk_buff *skb = NULL;
350 if (unlikely(parse->errlev || parse->errcode)) {
351 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
356 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
359 skb = napi_get_frags(napi);
364 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
365 while (start < end) {
366 sg = (struct nix_rx_sg_s *)start;
367 seg_addr = &sg->seg_addr;
368 seg_size = (void *)sg;
369 for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
370 if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
371 seg_size[seg], parse, cq->cq_idx))
374 start += sizeof(*sg);
376 otx2_set_rxhash(pfvf, cqe, skb);
378 skb_record_rx_queue(skb, cq->cq_idx);
379 if (pfvf->netdev->features & NETIF_F_RXCSUM)
380 skb->ip_summed = CHECKSUM_UNNECESSARY;
382 skb_mark_for_recycle(skb);
384 napi_gro_frags(napi);
387 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
388 struct napi_struct *napi,
389 struct otx2_cq_queue *cq, int budget)
391 struct nix_cqe_rx_s *cqe;
392 int processed_cqe = 0;
394 if (cq->pend_cqe >= budget)
397 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
401 while (likely(processed_cqe < budget) && cq->pend_cqe) {
402 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
403 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
410 cq->cq_head &= (cq->cqe_cnt - 1);
412 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
414 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
415 cqe->sg.seg_addr = 0x00;
420 /* Free CQEs to HW */
421 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
422 ((u64)cq->cq_idx << 32) | processed_cqe);
424 return processed_cqe;
427 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
429 struct otx2_nic *pfvf = dev;
432 while (cq->pool_ptrs) {
433 if (otx2_alloc_buffer(pfvf, cq, &bufptr))
435 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
440 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
441 struct otx2_cq_queue *cq, int budget)
443 int tx_pkts = 0, tx_bytes = 0, qidx;
444 struct otx2_snd_queue *sq;
445 struct nix_cqe_tx_s *cqe;
446 int processed_cqe = 0;
448 if (cq->pend_cqe >= budget)
451 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
455 qidx = cq->cq_idx - pfvf->hw.rx_queues;
456 sq = &pfvf->qset.sq[qidx];
458 while (likely(processed_cqe < budget) && cq->pend_cqe) {
459 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
460 if (unlikely(!cqe)) {
466 qidx = cq->cq_idx - pfvf->hw.rx_queues;
468 if (cq->cq_type == CQ_XDP)
469 otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
471 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
472 cqe, budget, &tx_pkts, &tx_bytes);
474 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
479 sq->cons_head &= (sq->sqe_cnt - 1);
482 /* Free CQEs to HW */
483 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
484 ((u64)cq->cq_idx << 32) | processed_cqe);
486 if (likely(tx_pkts)) {
487 struct netdev_queue *txq;
489 qidx = cq->cq_idx - pfvf->hw.rx_queues;
491 if (qidx >= pfvf->hw.tx_queues)
492 qidx -= pfvf->hw.xdp_queues;
493 txq = netdev_get_tx_queue(pfvf->netdev, qidx);
494 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
495 /* Check if queue was stopped earlier due to ring full */
497 if (netif_tx_queue_stopped(txq) &&
498 netif_carrier_ok(pfvf->netdev))
499 netif_tx_wake_queue(txq);
504 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
506 struct dim_sample dim_sample;
507 u64 rx_frames, rx_bytes;
509 rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
510 OTX2_GET_RX_STATS(RX_UCAST);
511 rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
512 dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
513 net_dim(&cq_poll->dim, dim_sample);
516 int otx2_napi_handler(struct napi_struct *napi, int budget)
518 struct otx2_cq_queue *rx_cq = NULL;
519 struct otx2_cq_poll *cq_poll;
520 int workdone = 0, cq_idx, i;
521 struct otx2_cq_queue *cq;
522 struct otx2_qset *qset;
523 struct otx2_nic *pfvf;
525 cq_poll = container_of(napi, struct otx2_cq_poll, napi);
526 pfvf = (struct otx2_nic *)cq_poll->dev;
529 for (i = 0; i < CQS_PER_CINT; i++) {
530 cq_idx = cq_poll->cq_ids[i];
531 if (unlikely(cq_idx == CINT_INVALID_CQ))
533 cq = &qset->cq[cq_idx];
534 if (cq->cq_type == CQ_RX) {
536 workdone += otx2_rx_napi_handler(pfvf, napi,
539 workdone += otx2_tx_napi_handler(pfvf, cq, budget);
543 if (rx_cq && rx_cq->pool_ptrs)
544 pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
546 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
548 if (workdone < budget && napi_complete_done(napi, workdone)) {
549 /* If interface is going down, don't re-enable IRQ */
550 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
553 /* Check for adaptive interrupt coalesce */
555 ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
556 OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
557 /* Adjust irq coalese using net_dim */
558 otx2_adjust_adaptive_coalese(pfvf, cq_poll);
559 /* Update irq coalescing */
560 for (i = 0; i < pfvf->hw.cint_cnt; i++)
561 otx2_config_irq_coalescing(pfvf, i);
564 /* Re-enable interrupts */
565 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
571 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
576 /* Packet data stores should finish before SQE is flushed to HW */
580 memcpy(sq->lmt_addr, sq->sqe_base, size);
581 status = otx2_lmt_flush(sq->io_addr);
582 } while (status == 0);
585 sq->head &= (sq->sqe_cnt - 1);
588 #define MAX_SEGS_PER_SG 3
589 /* Add SQE scatter/gather subdescriptor structure */
590 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
591 struct sk_buff *skb, int num_segs, int *offset)
593 struct nix_sqe_sg_s *sg = NULL;
594 u64 dma_addr, *iova = NULL;
598 sq->sg[sq->head].num_segs = 0;
600 for (seg = 0; seg < num_segs; seg++) {
601 if ((seg % MAX_SEGS_PER_SG) == 0) {
602 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
603 sg->ld_type = NIX_SEND_LDTYPE_LDD;
604 sg->subdc = NIX_SUBDC_SG;
606 sg_lens = (void *)sg;
607 iova = (void *)sg + sizeof(*sg);
608 /* Next subdc always starts at a 16byte boundary.
609 * So if sg->segs is whether 2 or 3, offset += 16bytes.
611 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
612 *offset += sizeof(*sg) + (3 * sizeof(u64));
614 *offset += sizeof(*sg) + sizeof(u64);
616 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
617 if (dma_mapping_error(pfvf->dev, dma_addr))
620 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
624 /* Save DMA mapping info for later unmapping */
625 sq->sg[sq->head].dma_addr[seg] = dma_addr;
626 sq->sg[sq->head].size[seg] = len;
627 sq->sg[sq->head].num_segs++;
630 sq->sg[sq->head].skb = (u64)skb;
634 /* Add SQE extended header subdescriptor */
635 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
636 struct sk_buff *skb, int *offset)
638 struct nix_sqe_ext_s *ext;
640 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
641 ext->subdc = NIX_SUBDC_EXT;
642 if (skb_shinfo(skb)->gso_size) {
644 ext->lso_sb = skb_tcp_all_headers(skb);
645 ext->lso_mps = skb_shinfo(skb)->gso_size;
647 /* Only TSOv4 and TSOv6 GSO offloads are supported */
648 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
649 ext->lso_format = pfvf->hw.lso_tsov4_idx;
651 /* HW adds payload size to 'ip_hdr->tot_len' while
652 * sending TSO segment, hence set payload length
653 * in IP header of the packet to just header length.
655 ip_hdr(skb)->tot_len =
656 htons(ext->lso_sb - skb_network_offset(skb));
657 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
658 ext->lso_format = pfvf->hw.lso_tsov6_idx;
659 ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
660 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
661 __be16 l3_proto = vlan_get_protocol(skb);
662 struct udphdr *udph = udp_hdr(skb);
665 ext->lso_sb = skb_transport_offset(skb) +
666 sizeof(struct udphdr);
668 /* HW adds payload size to length fields in IP and
669 * UDP headers while segmentation, hence adjust the
670 * lengths to just header sizes.
672 iplen = htons(ext->lso_sb - skb_network_offset(skb));
673 if (l3_proto == htons(ETH_P_IP)) {
674 ip_hdr(skb)->tot_len = iplen;
675 ext->lso_format = pfvf->hw.lso_udpv4_idx;
677 ipv6_hdr(skb)->payload_len = iplen;
678 ext->lso_format = pfvf->hw.lso_udpv6_idx;
681 udph->len = htons(sizeof(struct udphdr));
683 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
687 #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
688 if (skb_vlan_tag_present(skb)) {
689 if (skb->vlan_proto == htons(ETH_P_8021Q)) {
690 ext->vlan1_ins_ena = 1;
691 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
692 ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
693 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
694 ext->vlan0_ins_ena = 1;
695 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
696 ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
700 *offset += sizeof(*ext);
703 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
704 int alg, u64 iova, int ptp_offset,
705 u64 base_ns, bool udp_csum_crt)
707 struct nix_sqe_mem_s *mem;
709 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
710 mem->subdc = NIX_SUBDC_MEM;
712 mem->wmem = 1; /* wait for the memory operation */
716 mem->start_offset = ptp_offset;
717 mem->udp_csum_crt = !!udp_csum_crt;
718 mem->base_ns = base_ns;
722 *offset += sizeof(*mem);
725 /* Add SQE header subdescriptor structure */
726 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
727 struct nix_sqe_hdr_s *sqe_hdr,
728 struct sk_buff *skb, u16 qidx)
732 /* Check if SQE was framed before, if yes then no need to
733 * set these constants again and again.
735 if (!sqe_hdr->total) {
736 /* Don't free Tx buffers to Aura */
738 sqe_hdr->aura = sq->aura_id;
739 /* Post a CQE Tx after pkt transmission */
741 sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
742 qidx + pfvf->hw.xdp_queues : qidx;
744 sqe_hdr->total = skb->len;
745 /* Set SQE identifier which will be used later for freeing SKB */
746 sqe_hdr->sqe_id = sq->head;
748 /* Offload TCP/UDP checksum to HW */
749 if (skb->ip_summed == CHECKSUM_PARTIAL) {
750 sqe_hdr->ol3ptr = skb_network_offset(skb);
751 sqe_hdr->ol4ptr = skb_transport_offset(skb);
752 /* get vlan protocol Ethertype */
753 if (eth_type_vlan(skb->protocol))
754 skb->protocol = vlan_get_protocol(skb);
756 if (skb->protocol == htons(ETH_P_IP)) {
757 proto = ip_hdr(skb)->protocol;
758 /* In case of TSO, HW needs this to be explicitly set.
759 * So set this always, instead of adding a check.
761 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
762 } else if (skb->protocol == htons(ETH_P_IPV6)) {
763 proto = ipv6_hdr(skb)->nexthdr;
764 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
767 if (proto == IPPROTO_TCP)
768 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
769 else if (proto == IPPROTO_UDP)
770 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
774 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
775 struct otx2_snd_queue *sq,
776 struct sk_buff *skb, int sqe, int hdr_len)
778 int num_segs = skb_shinfo(skb)->nr_frags + 1;
779 struct sg_list *sg = &sq->sg[sqe];
785 /* Get payload length at skb->data */
786 len = skb_headlen(skb) - hdr_len;
788 for (seg = 0; seg < num_segs; seg++) {
789 /* Skip skb->data, if there is no payload */
792 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
793 if (dma_mapping_error(pfvf->dev, dma_addr))
796 /* Save DMA mapping info for later unmapping */
797 sg->dma_addr[sg->num_segs] = dma_addr;
798 sg->size[sg->num_segs] = len;
803 otx2_dma_unmap_skb_frags(pfvf, sg);
807 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
808 struct sk_buff *skb, int seg,
809 u64 seg_addr, int hdr_len, int sqe)
811 struct sg_list *sg = &sq->sg[sqe];
812 const skb_frag_t *frag;
816 return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
818 frag = &skb_shinfo(skb)->frags[seg];
819 offset = seg_addr - (u64)skb_frag_address(frag);
820 if (skb_headlen(skb) - hdr_len)
822 return sg->dma_addr[seg] + offset;
825 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
826 struct sg_list *list, int *offset)
828 struct nix_sqe_sg_s *sg = NULL;
833 /* Add SG descriptors with buffer addresses */
834 for (seg = 0; seg < list->num_segs; seg++) {
835 if ((seg % MAX_SEGS_PER_SG) == 0) {
836 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
837 sg->ld_type = NIX_SEND_LDTYPE_LDD;
838 sg->subdc = NIX_SUBDC_SG;
840 sg_lens = (void *)sg;
841 iova = (void *)sg + sizeof(*sg);
842 /* Next subdc always starts at a 16byte boundary.
843 * So if sg->segs is whether 2 or 3, offset += 16bytes.
845 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
846 *offset += sizeof(*sg) + (3 * sizeof(u64));
848 *offset += sizeof(*sg) + sizeof(u64);
850 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
851 *iova++ = list->dma_addr[seg];
856 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
857 struct sk_buff *skb, u16 qidx)
859 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
860 int hdr_len, tcp_data, seg_len, pkt_len, offset;
861 struct nix_sqe_hdr_s *sqe_hdr;
862 int first_sqe = sq->head;
866 hdr_len = tso_start(skb, &tso);
868 /* Map SKB's fragments to DMA.
869 * It's done here to avoid mapping for every TSO segment's packet.
871 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
872 dev_kfree_skb_any(skb);
876 netdev_tx_sent_queue(txq, skb->len);
878 tcp_data = skb->len - hdr_len;
879 while (tcp_data > 0) {
882 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
885 /* Set SQE's SEND_HDR */
886 memset(sq->sqe_base, 0, sq->sqe_size);
887 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
888 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
889 offset = sizeof(*sqe_hdr);
891 /* Add TSO segment's pkt header */
892 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
893 tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
895 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
896 list.size[0] = hdr_len;
899 /* Add TSO segment's payload data fragments */
901 while (seg_len > 0) {
904 size = min_t(int, tso.size, seg_len);
906 list.size[list.num_segs] = size;
907 list.dma_addr[list.num_segs] =
908 otx2_tso_frag_dma_addr(sq, skb,
909 tso.next_frag_idx - 1,
910 (u64)tso.data, hdr_len,
915 tso_build_data(skb, &tso, size);
917 sqe_hdr->total = pkt_len;
918 otx2_sqe_tso_add_sg(sq, &list, &offset);
920 /* DMA mappings and skb needs to be freed only after last
921 * TSO segment is transmitted out. So set 'PNC' only for
922 * last segment. Also point last segment's sqe_id to first
923 * segment's SQE index where skb address and DMA mappings
928 sqe_hdr->sqe_id = first_sqe;
929 sq->sg[first_sqe].skb = (u64)skb;
934 sqe_hdr->sizem1 = (offset / 16) - 1;
936 /* Flush SQE to HW */
937 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
941 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
944 int payload_len, last_seg_size;
946 if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
949 /* On 96xx A0, HW TSO not supported */
950 if (!is_96xx_B0(pfvf->pdev))
953 /* HW has an issue due to which when the payload of the last LSO
954 * segment is shorter than 16 bytes, some header fields may not
955 * be correctly modified, hence don't offload such TSO segments.
958 payload_len = skb->len - skb_tcp_all_headers(skb);
959 last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
960 if (last_seg_size && last_seg_size < 16)
966 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
968 if (!skb_shinfo(skb)->gso_size)
972 if (is_hw_tso_supported(pfvf, skb))
976 return skb_shinfo(skb)->gso_segs;
979 static bool otx2_validate_network_transport(struct sk_buff *skb)
981 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
982 (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
983 struct udphdr *udph = udp_hdr(skb);
985 if (udph->source == htons(PTP_PORT) &&
986 udph->dest == htons(PTP_PORT))
993 static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
995 struct ethhdr *eth = (struct ethhdr *)(skb->data);
996 u16 nix_offload_hlen = 0, inner_vhlen = 0;
997 bool udp_hdr_present = false, is_sync;
998 u8 *data = skb->data, *msgtype;
999 __be16 proto = eth->h_proto;
1000 int network_depth = 0;
1002 /* NIX is programmed to offload outer VLAN header
1003 * in case of single vlan protocol field holds Network header ETH_IP/V6
1004 * in case of stacked vlan protocol field holds Inner vlan (8100)
1006 if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
1007 skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
1008 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
1009 /* Get vlan protocol */
1010 proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
1011 /* SKB APIs like skb_transport_offset does not include
1012 * offloaded vlan header length. Need to explicitly add
1015 nix_offload_hlen = VLAN_HLEN;
1016 inner_vhlen = VLAN_HLEN;
1017 } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
1018 nix_offload_hlen = VLAN_HLEN;
1020 } else if (eth_type_vlan(eth->h_proto)) {
1021 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1024 switch (ntohs(proto)) {
1027 *offset = network_depth;
1029 *offset = ETH_HLEN + nix_offload_hlen +
1034 if (!otx2_validate_network_transport(skb))
1037 *offset = nix_offload_hlen + skb_transport_offset(skb) +
1038 sizeof(struct udphdr);
1039 udp_hdr_present = true;
1043 msgtype = data + *offset;
1044 /* Check PTP messageId is SYNC or not */
1045 is_sync = !(*msgtype & 0xf);
1047 *udp_csum_crt = udp_hdr_present;
1054 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
1055 struct otx2_snd_queue *sq, int *offset)
1057 struct ethhdr *eth = (struct ethhdr *)(skb->data);
1058 struct ptpv2_tstamp *origin_tstamp;
1059 bool udp_csum_crt = false;
1060 unsigned int udphoff;
1061 struct timespec64 ts;
1066 if (unlikely(!skb_shinfo(skb)->gso_size &&
1067 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
1068 if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
1069 otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
1070 origin_tstamp = (struct ptpv2_tstamp *)
1071 ((u8 *)skb->data + ptp_offset +
1072 PTP_SYNC_SEC_OFFSET);
1073 ts = ns_to_timespec64(pfvf->ptp->tstamp);
1074 origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
1075 origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
1076 origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
1077 /* Point to correction field in PTP packet */
1080 /* When user disables hw checksum, stack calculates the csum,
1081 * but it does not cover ptp timestamp which is added later.
1082 * Recalculate the checksum manually considering the timestamp.
1085 struct udphdr *uh = udp_hdr(skb);
1087 if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
1088 udphoff = skb_transport_offset(skb);
1090 skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
1092 if (ntohs(eth->h_proto) == ETH_P_IPV6)
1093 uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1094 &ipv6_hdr(skb)->daddr,
1096 ipv6_hdr(skb)->nexthdr,
1099 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
1107 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1109 iova = sq->timestamps->iova + (sq->head * sizeof(u64));
1110 otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
1111 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
1113 skb_tx_timestamp(skb);
1117 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
1118 struct sk_buff *skb, u16 qidx)
1120 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
1121 struct otx2_nic *pfvf = netdev_priv(netdev);
1122 int offset, num_segs, free_desc;
1123 struct nix_sqe_hdr_s *sqe_hdr;
1125 /* Check if there is enough room between producer
1126 * and consumer index.
1128 free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
1129 if (free_desc < sq->sqe_thresh)
1132 if (free_desc < otx2_get_sqe_count(pfvf, skb))
1135 num_segs = skb_shinfo(skb)->nr_frags + 1;
1137 /* If SKB doesn't fit in a single SQE, linearize it.
1138 * TODO: Consider adding JUMP descriptor instead.
1140 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
1141 if (__skb_linearize(skb)) {
1142 dev_kfree_skb_any(skb);
1145 num_segs = skb_shinfo(skb)->nr_frags + 1;
1148 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
1149 /* Insert vlan tag before giving pkt to tso */
1150 if (skb_vlan_tag_present(skb))
1151 skb = __vlan_hwaccel_push_inside(skb);
1152 otx2_sq_append_tso(pfvf, sq, skb, qidx);
1156 /* Set SQE's SEND_HDR.
1157 * Do not clear the first 64bit as it contains constant info.
1159 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1160 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1161 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
1162 offset = sizeof(*sqe_hdr);
1164 /* Add extended header if needed */
1165 otx2_sqe_add_ext(pfvf, sq, skb, &offset);
1167 /* Add SG subdesc with data frags */
1168 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
1169 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
1173 otx2_set_txtstamp(pfvf, skb, sq, &offset);
1175 sqe_hdr->sizem1 = (offset / 16) - 1;
1177 netdev_tx_sent_queue(txq, skb->len);
1179 /* Flush SQE to HW */
1180 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1184 EXPORT_SYMBOL(otx2_sq_append_skb);
1186 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx)
1188 struct nix_cqe_rx_s *cqe;
1189 struct otx2_pool *pool;
1190 int processed_cqe = 0;
1195 xdp_rxq_info_unreg(&cq->xdp_rxq);
1197 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1200 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
1201 pool = &pfvf->qset.pool[pool_id];
1203 while (cq->pend_cqe) {
1204 cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
1210 if (cqe->sg.segs > 1) {
1211 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
1214 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1216 otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
1219 /* Free CQEs to HW */
1220 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1221 ((u64)cq->cq_idx << 32) | processed_cqe);
1224 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1226 struct sk_buff *skb = NULL;
1227 struct otx2_snd_queue *sq;
1228 struct nix_cqe_tx_s *cqe;
1229 int processed_cqe = 0;
1233 qidx = cq->cq_idx - pfvf->hw.rx_queues;
1234 sq = &pfvf->qset.sq[qidx];
1236 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1239 while (cq->pend_cqe) {
1240 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
1246 sg = &sq->sg[cqe->comp.sqe_id];
1247 skb = (struct sk_buff *)sg->skb;
1249 otx2_dma_unmap_skb_frags(pfvf, sg);
1250 dev_kfree_skb_any(skb);
1251 sg->skb = (u64)NULL;
1255 /* Free CQEs to HW */
1256 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1257 ((u64)cq->cq_idx << 32) | processed_cqe);
1260 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
1262 struct msg_req *msg;
1265 mutex_lock(&pfvf->mbox.lock);
1267 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
1269 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
1272 mutex_unlock(&pfvf->mbox.lock);
1276 err = otx2_sync_mbox_msg(&pfvf->mbox);
1277 mutex_unlock(&pfvf->mbox.lock);
1281 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
1282 int len, int *offset)
1284 struct nix_sqe_sg_s *sg = NULL;
1287 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
1288 sg->ld_type = NIX_SEND_LDTYPE_LDD;
1289 sg->subdc = NIX_SUBDC_SG;
1291 sg->seg1_size = len;
1292 iova = (void *)sg + sizeof(*sg);
1294 *offset += sizeof(*sg) + sizeof(u64);
1296 sq->sg[sq->head].dma_addr[0] = dma_addr;
1297 sq->sg[sq->head].size[0] = len;
1298 sq->sg[sq->head].num_segs = 1;
1301 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
1303 struct nix_sqe_hdr_s *sqe_hdr;
1304 struct otx2_snd_queue *sq;
1305 int offset, free_sqe;
1307 sq = &pfvf->qset.sq[qidx];
1308 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
1309 if (free_sqe < sq->sqe_thresh)
1312 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1314 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1316 if (!sqe_hdr->total) {
1317 sqe_hdr->aura = sq->aura_id;
1322 sqe_hdr->total = len;
1323 sqe_hdr->sqe_id = sq->head;
1325 offset = sizeof(*sqe_hdr);
1327 otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
1328 sqe_hdr->sizem1 = (offset / 16) - 1;
1329 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1334 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
1335 struct bpf_prog *prog,
1336 struct nix_cqe_rx_s *cqe,
1337 struct otx2_cq_queue *cq)
1339 unsigned char *hard_start, *data;
1340 int qidx = cq->cq_idx;
1341 struct xdp_buff xdp;
1347 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1348 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1349 page = virt_to_page(phys_to_virt(pa));
1351 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
1353 data = (unsigned char *)phys_to_virt(pa);
1354 hard_start = page_address(page);
1355 xdp_prepare_buff(&xdp, hard_start, data - hard_start,
1356 cqe->sg.seg_size, false);
1358 act = bpf_prog_run_xdp(prog, &xdp);
1364 qidx += pfvf->hw.tx_queues;
1366 return otx2_xdp_sq_append_pkt(pfvf, iova,
1367 cqe->sg.seg_size, qidx);
1370 err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
1372 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1379 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
1382 trace_xdp_exception(pfvf->netdev, prog, act);
1385 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,