1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/etherdevice.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_trace.h>
15 #include "otx2_common.h"
16 #include "otx2_struct.h"
17 #include "otx2_txrx.h"
21 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
22 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
23 struct bpf_prog *prog,
24 struct nix_cqe_rx_s *cqe,
25 struct otx2_cq_queue *cq);
27 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
28 struct otx2_cq_queue *cq)
30 u64 incr = (u64)(cq->cq_idx) << 32;
33 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
35 if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
36 status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
37 dev_err(pfvf->dev, "CQ stopped due to error");
41 cq->cq_tail = status & 0xFFFFF;
42 cq->cq_head = (status >> 20) & 0xFFFFF;
43 if (cq->cq_tail < cq->cq_head)
44 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
47 cq->pend_cqe = cq->cq_tail - cq->cq_head;
52 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
54 struct nix_cqe_hdr_s *cqe_hdr;
56 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
57 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
61 cq->cq_head &= (cq->cqe_cnt - 1);
66 static unsigned int frag_num(unsigned int i)
69 return (i & ~3) + 3 - (i & 3);
75 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
76 struct sk_buff *skb, int seg, int *len)
78 const skb_frag_t *frag;
82 /* First segment is always skb->data */
84 page = virt_to_page(skb->data);
85 offset = offset_in_page(skb->data);
86 *len = skb_headlen(skb);
88 frag = &skb_shinfo(skb)->frags[seg - 1];
89 page = skb_frag_page(frag);
90 offset = skb_frag_off(frag);
91 *len = skb_frag_size(frag);
93 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
96 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
100 for (seg = 0; seg < sg->num_segs; seg++) {
101 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
102 sg->size[seg], DMA_TO_DEVICE);
107 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
108 struct otx2_snd_queue *sq,
109 struct nix_cqe_tx_s *cqe)
111 struct nix_send_comp_s *snd_comp = &cqe->comp;
116 sg = &sq->sg[snd_comp->sqe_id];
118 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
119 otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
120 sg->size[0], DMA_TO_DEVICE);
121 page = virt_to_page(phys_to_virt(pa));
125 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
126 struct otx2_cq_queue *cq,
127 struct otx2_snd_queue *sq,
128 struct nix_cqe_tx_s *cqe,
129 int budget, int *tx_pkts, int *tx_bytes)
131 struct nix_send_comp_s *snd_comp = &cqe->comp;
132 struct skb_shared_hwtstamps ts;
133 struct sk_buff *skb = NULL;
138 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
139 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
140 pfvf->netdev->name, cq->cint_idx,
143 sg = &sq->sg[snd_comp->sqe_id];
144 skb = (struct sk_buff *)sg->skb;
148 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
149 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
150 if (timestamp != 1) {
151 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
152 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
154 memset(&ts, 0, sizeof(ts));
155 ts.hwtstamp = ns_to_ktime(tsns);
156 skb_tstamp_tx(skb, &ts);
161 *tx_bytes += skb->len;
163 otx2_dma_unmap_skb_frags(pfvf, sg);
164 napi_consume_skb(skb, budget);
168 static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
169 struct sk_buff *skb, void *data)
174 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
177 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
178 /* The first 8 bytes is the timestamp */
179 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
183 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
186 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
187 u64 iova, int len, struct nix_rx_parse_s *parse,
194 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
196 if (likely(!skb_shinfo(skb)->nr_frags)) {
197 /* Check if data starts at some nonzero offset
198 * from the start of the buffer. For now the
199 * only possible offset is 8 bytes in the case
200 * where packet is prepended by a timestamp.
203 otx2_set_rxtstamp(pfvf, skb, va);
204 off = OTX2_HW_TIMESTAMP_LEN;
208 page = virt_to_page(va);
209 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
210 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
211 va - page_address(page) + off,
212 len - off, pfvf->rbsize);
214 otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
215 pfvf->rbsize, DMA_FROM_DEVICE);
219 /* If more than MAX_SKB_FRAGS fragments are received then
220 * give back those buffer pointers to hardware for reuse.
222 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
227 static void otx2_set_rxhash(struct otx2_nic *pfvf,
228 struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
230 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
231 struct otx2_rss_info *rss;
234 if (!(pfvf->netdev->features & NETIF_F_RXHASH))
237 rss = &pfvf->hw.rss_info;
238 if (rss->flowkey_cfg) {
239 if (rss->flowkey_cfg &
240 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
241 hash_type = PKT_HASH_TYPE_L4;
243 hash_type = PKT_HASH_TYPE_L3;
244 hash = cqe->hdr.flow_tag;
246 skb_set_hash(skb, hash, hash_type);
249 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
252 struct nix_rx_sg_s *sg = &cqe->sg;
258 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
259 while (start < end) {
260 sg = (struct nix_rx_sg_s *)start;
261 seg_addr = &sg->seg_addr;
262 for (seg = 0; seg < sg->segs; seg++, seg_addr++)
263 pfvf->hw_ops->aura_freeptr(pfvf, qidx,
264 *seg_addr & ~0x07ULL);
265 start += sizeof(*sg);
269 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
270 struct nix_cqe_rx_s *cqe, int qidx)
272 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
273 struct nix_rx_parse_s *parse = &cqe->parse;
275 if (netif_msg_rx_err(pfvf))
276 netdev_err(pfvf->netdev,
277 "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
278 qidx, parse->errlev, parse->errcode);
280 if (parse->errlev == NPC_ERRLVL_RE) {
281 switch (parse->errcode) {
283 case ERRCODE_FCS_RCV:
284 atomic_inc(&stats->rx_fcs_errs);
286 case ERRCODE_UNDERSIZE:
287 atomic_inc(&stats->rx_undersize_errs);
289 case ERRCODE_OVERSIZE:
290 atomic_inc(&stats->rx_oversize_errs);
292 case ERRCODE_OL2_LEN_MISMATCH:
293 atomic_inc(&stats->rx_len_errs);
296 atomic_inc(&stats->rx_other_errs);
299 } else if (parse->errlev == NPC_ERRLVL_NIX) {
300 switch (parse->errcode) {
301 case ERRCODE_OL3_LEN:
302 case ERRCODE_OL4_LEN:
303 case ERRCODE_IL3_LEN:
304 case ERRCODE_IL4_LEN:
305 atomic_inc(&stats->rx_len_errs);
307 case ERRCODE_OL4_CSUM:
308 case ERRCODE_IL4_CSUM:
309 atomic_inc(&stats->rx_csum_errs);
312 atomic_inc(&stats->rx_other_errs);
316 atomic_inc(&stats->rx_other_errs);
317 /* For now ignore all the NPC parser errors and
318 * pass the packets to stack.
323 /* If RXALL is enabled pass on packets to stack. */
324 if (pfvf->netdev->features & NETIF_F_RXALL)
327 /* Free buffer back to pool */
329 otx2_free_rcv_seg(pfvf, cqe, qidx);
333 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
334 struct napi_struct *napi,
335 struct otx2_cq_queue *cq,
336 struct nix_cqe_rx_s *cqe)
338 struct nix_rx_parse_s *parse = &cqe->parse;
339 struct nix_rx_sg_s *sg = &cqe->sg;
340 struct sk_buff *skb = NULL;
346 if (unlikely(parse->errlev || parse->errcode)) {
347 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
352 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
355 skb = napi_get_frags(napi);
360 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
361 while (start < end) {
362 sg = (struct nix_rx_sg_s *)start;
363 seg_addr = &sg->seg_addr;
364 seg_size = (void *)sg;
365 for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
366 if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
367 seg_size[seg], parse, cq->cq_idx))
370 start += sizeof(*sg);
372 otx2_set_rxhash(pfvf, cqe, skb);
374 skb_record_rx_queue(skb, cq->cq_idx);
375 if (pfvf->netdev->features & NETIF_F_RXCSUM)
376 skb->ip_summed = CHECKSUM_UNNECESSARY;
378 napi_gro_frags(napi);
381 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
382 struct napi_struct *napi,
383 struct otx2_cq_queue *cq, int budget)
385 struct nix_cqe_rx_s *cqe;
386 int processed_cqe = 0;
388 if (cq->pend_cqe >= budget)
391 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
395 while (likely(processed_cqe < budget) && cq->pend_cqe) {
396 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
397 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
404 cq->cq_head &= (cq->cqe_cnt - 1);
406 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
408 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
409 cqe->sg.seg_addr = 0x00;
414 /* Free CQEs to HW */
415 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
416 ((u64)cq->cq_idx << 32) | processed_cqe);
418 return processed_cqe;
421 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
423 struct otx2_nic *pfvf = dev;
426 while (cq->pool_ptrs) {
427 if (otx2_alloc_buffer(pfvf, cq, &bufptr))
429 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
434 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
435 struct otx2_cq_queue *cq, int budget)
437 int tx_pkts = 0, tx_bytes = 0, qidx;
438 struct nix_cqe_tx_s *cqe;
439 int processed_cqe = 0;
441 if (cq->pend_cqe >= budget)
444 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
448 while (likely(processed_cqe < budget) && cq->pend_cqe) {
449 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
450 if (unlikely(!cqe)) {
455 if (cq->cq_type == CQ_XDP) {
456 qidx = cq->cq_idx - pfvf->hw.rx_queues;
457 otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
460 otx2_snd_pkt_handler(pfvf, cq,
461 &pfvf->qset.sq[cq->cint_idx],
462 cqe, budget, &tx_pkts, &tx_bytes);
464 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
469 /* Free CQEs to HW */
470 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
471 ((u64)cq->cq_idx << 32) | processed_cqe);
473 if (likely(tx_pkts)) {
474 struct netdev_queue *txq;
476 txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
477 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
478 /* Check if queue was stopped earlier due to ring full */
480 if (netif_tx_queue_stopped(txq) &&
481 netif_carrier_ok(pfvf->netdev))
482 netif_tx_wake_queue(txq);
487 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
489 struct dim_sample dim_sample;
490 u64 rx_frames, rx_bytes;
492 rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
493 OTX2_GET_RX_STATS(RX_UCAST);
494 rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
495 dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
496 net_dim(&cq_poll->dim, dim_sample);
499 int otx2_napi_handler(struct napi_struct *napi, int budget)
501 struct otx2_cq_queue *rx_cq = NULL;
502 struct otx2_cq_poll *cq_poll;
503 int workdone = 0, cq_idx, i;
504 struct otx2_cq_queue *cq;
505 struct otx2_qset *qset;
506 struct otx2_nic *pfvf;
508 cq_poll = container_of(napi, struct otx2_cq_poll, napi);
509 pfvf = (struct otx2_nic *)cq_poll->dev;
512 for (i = 0; i < CQS_PER_CINT; i++) {
513 cq_idx = cq_poll->cq_ids[i];
514 if (unlikely(cq_idx == CINT_INVALID_CQ))
516 cq = &qset->cq[cq_idx];
517 if (cq->cq_type == CQ_RX) {
519 workdone += otx2_rx_napi_handler(pfvf, napi,
522 workdone += otx2_tx_napi_handler(pfvf, cq, budget);
526 if (rx_cq && rx_cq->pool_ptrs)
527 pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
529 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
531 if (workdone < budget && napi_complete_done(napi, workdone)) {
532 /* If interface is going down, don't re-enable IRQ */
533 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
536 /* Check for adaptive interrupt coalesce */
538 ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
539 OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
540 /* Adjust irq coalese using net_dim */
541 otx2_adjust_adaptive_coalese(pfvf, cq_poll);
542 /* Update irq coalescing */
543 for (i = 0; i < pfvf->hw.cint_cnt; i++)
544 otx2_config_irq_coalescing(pfvf, i);
547 /* Re-enable interrupts */
548 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
554 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
559 /* Packet data stores should finish before SQE is flushed to HW */
563 memcpy(sq->lmt_addr, sq->sqe_base, size);
564 status = otx2_lmt_flush(sq->io_addr);
565 } while (status == 0);
568 sq->head &= (sq->sqe_cnt - 1);
571 #define MAX_SEGS_PER_SG 3
572 /* Add SQE scatter/gather subdescriptor structure */
573 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
574 struct sk_buff *skb, int num_segs, int *offset)
576 struct nix_sqe_sg_s *sg = NULL;
577 u64 dma_addr, *iova = NULL;
581 sq->sg[sq->head].num_segs = 0;
583 for (seg = 0; seg < num_segs; seg++) {
584 if ((seg % MAX_SEGS_PER_SG) == 0) {
585 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
586 sg->ld_type = NIX_SEND_LDTYPE_LDD;
587 sg->subdc = NIX_SUBDC_SG;
589 sg_lens = (void *)sg;
590 iova = (void *)sg + sizeof(*sg);
591 /* Next subdc always starts at a 16byte boundary.
592 * So if sg->segs is whether 2 or 3, offset += 16bytes.
594 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
595 *offset += sizeof(*sg) + (3 * sizeof(u64));
597 *offset += sizeof(*sg) + sizeof(u64);
599 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
600 if (dma_mapping_error(pfvf->dev, dma_addr))
603 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
607 /* Save DMA mapping info for later unmapping */
608 sq->sg[sq->head].dma_addr[seg] = dma_addr;
609 sq->sg[sq->head].size[seg] = len;
610 sq->sg[sq->head].num_segs++;
613 sq->sg[sq->head].skb = (u64)skb;
617 /* Add SQE extended header subdescriptor */
618 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
619 struct sk_buff *skb, int *offset)
621 struct nix_sqe_ext_s *ext;
623 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
624 ext->subdc = NIX_SUBDC_EXT;
625 if (skb_shinfo(skb)->gso_size) {
627 ext->lso_sb = skb_tcp_all_headers(skb);
628 ext->lso_mps = skb_shinfo(skb)->gso_size;
630 /* Only TSOv4 and TSOv6 GSO offloads are supported */
631 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
632 ext->lso_format = pfvf->hw.lso_tsov4_idx;
634 /* HW adds payload size to 'ip_hdr->tot_len' while
635 * sending TSO segment, hence set payload length
636 * in IP header of the packet to just header length.
638 ip_hdr(skb)->tot_len =
639 htons(ext->lso_sb - skb_network_offset(skb));
640 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
641 ext->lso_format = pfvf->hw.lso_tsov6_idx;
643 ipv6_hdr(skb)->payload_len =
644 htons(ext->lso_sb - skb_network_offset(skb));
645 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
646 __be16 l3_proto = vlan_get_protocol(skb);
647 struct udphdr *udph = udp_hdr(skb);
650 ext->lso_sb = skb_transport_offset(skb) +
651 sizeof(struct udphdr);
653 /* HW adds payload size to length fields in IP and
654 * UDP headers while segmentation, hence adjust the
655 * lengths to just header sizes.
657 iplen = htons(ext->lso_sb - skb_network_offset(skb));
658 if (l3_proto == htons(ETH_P_IP)) {
659 ip_hdr(skb)->tot_len = iplen;
660 ext->lso_format = pfvf->hw.lso_udpv4_idx;
662 ipv6_hdr(skb)->payload_len = iplen;
663 ext->lso_format = pfvf->hw.lso_udpv6_idx;
666 udph->len = htons(sizeof(struct udphdr));
668 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
672 #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
673 if (skb_vlan_tag_present(skb)) {
674 if (skb->vlan_proto == htons(ETH_P_8021Q)) {
675 ext->vlan1_ins_ena = 1;
676 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
677 ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
678 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
679 ext->vlan0_ins_ena = 1;
680 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
681 ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
685 *offset += sizeof(*ext);
688 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
691 struct nix_sqe_mem_s *mem;
693 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
694 mem->subdc = NIX_SUBDC_MEM;
696 mem->wmem = 1; /* wait for the memory operation */
699 *offset += sizeof(*mem);
702 /* Add SQE header subdescriptor structure */
703 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
704 struct nix_sqe_hdr_s *sqe_hdr,
705 struct sk_buff *skb, u16 qidx)
709 /* Check if SQE was framed before, if yes then no need to
710 * set these constants again and again.
712 if (!sqe_hdr->total) {
713 /* Don't free Tx buffers to Aura */
715 sqe_hdr->aura = sq->aura_id;
716 /* Post a CQE Tx after pkt transmission */
720 sqe_hdr->total = skb->len;
721 /* Set SQE identifier which will be used later for freeing SKB */
722 sqe_hdr->sqe_id = sq->head;
724 /* Offload TCP/UDP checksum to HW */
725 if (skb->ip_summed == CHECKSUM_PARTIAL) {
726 sqe_hdr->ol3ptr = skb_network_offset(skb);
727 sqe_hdr->ol4ptr = skb_transport_offset(skb);
728 /* get vlan protocol Ethertype */
729 if (eth_type_vlan(skb->protocol))
730 skb->protocol = vlan_get_protocol(skb);
732 if (skb->protocol == htons(ETH_P_IP)) {
733 proto = ip_hdr(skb)->protocol;
734 /* In case of TSO, HW needs this to be explicitly set.
735 * So set this always, instead of adding a check.
737 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
738 } else if (skb->protocol == htons(ETH_P_IPV6)) {
739 proto = ipv6_hdr(skb)->nexthdr;
740 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
743 if (proto == IPPROTO_TCP)
744 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
745 else if (proto == IPPROTO_UDP)
746 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
750 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
751 struct otx2_snd_queue *sq,
752 struct sk_buff *skb, int sqe, int hdr_len)
754 int num_segs = skb_shinfo(skb)->nr_frags + 1;
755 struct sg_list *sg = &sq->sg[sqe];
761 /* Get payload length at skb->data */
762 len = skb_headlen(skb) - hdr_len;
764 for (seg = 0; seg < num_segs; seg++) {
765 /* Skip skb->data, if there is no payload */
768 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
769 if (dma_mapping_error(pfvf->dev, dma_addr))
772 /* Save DMA mapping info for later unmapping */
773 sg->dma_addr[sg->num_segs] = dma_addr;
774 sg->size[sg->num_segs] = len;
779 otx2_dma_unmap_skb_frags(pfvf, sg);
783 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
784 struct sk_buff *skb, int seg,
785 u64 seg_addr, int hdr_len, int sqe)
787 struct sg_list *sg = &sq->sg[sqe];
788 const skb_frag_t *frag;
792 return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
794 frag = &skb_shinfo(skb)->frags[seg];
795 offset = seg_addr - (u64)skb_frag_address(frag);
796 if (skb_headlen(skb) - hdr_len)
798 return sg->dma_addr[seg] + offset;
801 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
802 struct sg_list *list, int *offset)
804 struct nix_sqe_sg_s *sg = NULL;
809 /* Add SG descriptors with buffer addresses */
810 for (seg = 0; seg < list->num_segs; seg++) {
811 if ((seg % MAX_SEGS_PER_SG) == 0) {
812 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
813 sg->ld_type = NIX_SEND_LDTYPE_LDD;
814 sg->subdc = NIX_SUBDC_SG;
816 sg_lens = (void *)sg;
817 iova = (void *)sg + sizeof(*sg);
818 /* Next subdc always starts at a 16byte boundary.
819 * So if sg->segs is whether 2 or 3, offset += 16bytes.
821 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
822 *offset += sizeof(*sg) + (3 * sizeof(u64));
824 *offset += sizeof(*sg) + sizeof(u64);
826 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
827 *iova++ = list->dma_addr[seg];
832 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
833 struct sk_buff *skb, u16 qidx)
835 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
836 int hdr_len, tcp_data, seg_len, pkt_len, offset;
837 struct nix_sqe_hdr_s *sqe_hdr;
838 int first_sqe = sq->head;
842 hdr_len = tso_start(skb, &tso);
844 /* Map SKB's fragments to DMA.
845 * It's done here to avoid mapping for every TSO segment's packet.
847 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
848 dev_kfree_skb_any(skb);
852 netdev_tx_sent_queue(txq, skb->len);
854 tcp_data = skb->len - hdr_len;
855 while (tcp_data > 0) {
858 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
861 /* Set SQE's SEND_HDR */
862 memset(sq->sqe_base, 0, sq->sqe_size);
863 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
864 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
865 offset = sizeof(*sqe_hdr);
867 /* Add TSO segment's pkt header */
868 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
869 tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
871 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
872 list.size[0] = hdr_len;
875 /* Add TSO segment's payload data fragments */
877 while (seg_len > 0) {
880 size = min_t(int, tso.size, seg_len);
882 list.size[list.num_segs] = size;
883 list.dma_addr[list.num_segs] =
884 otx2_tso_frag_dma_addr(sq, skb,
885 tso.next_frag_idx - 1,
886 (u64)tso.data, hdr_len,
891 tso_build_data(skb, &tso, size);
893 sqe_hdr->total = pkt_len;
894 otx2_sqe_tso_add_sg(sq, &list, &offset);
896 /* DMA mappings and skb needs to be freed only after last
897 * TSO segment is transmitted out. So set 'PNC' only for
898 * last segment. Also point last segment's sqe_id to first
899 * segment's SQE index where skb address and DMA mappings
904 sqe_hdr->sqe_id = first_sqe;
905 sq->sg[first_sqe].skb = (u64)skb;
910 sqe_hdr->sizem1 = (offset / 16) - 1;
912 /* Flush SQE to HW */
913 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
917 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
920 int payload_len, last_seg_size;
922 if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
925 /* On 96xx A0, HW TSO not supported */
926 if (!is_96xx_B0(pfvf->pdev))
929 /* HW has an issue due to which when the payload of the last LSO
930 * segment is shorter than 16 bytes, some header fields may not
931 * be correctly modified, hence don't offload such TSO segments.
934 payload_len = skb->len - skb_tcp_all_headers(skb);
935 last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
936 if (last_seg_size && last_seg_size < 16)
942 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
944 if (!skb_shinfo(skb)->gso_size)
948 if (is_hw_tso_supported(pfvf, skb))
952 return skb_shinfo(skb)->gso_segs;
955 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
956 struct otx2_snd_queue *sq, int *offset)
960 if (!skb_shinfo(skb)->gso_size &&
961 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
962 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
963 iova = sq->timestamps->iova + (sq->head * sizeof(u64));
964 otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
966 skb_tx_timestamp(skb);
970 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
971 struct sk_buff *skb, u16 qidx)
973 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
974 struct otx2_nic *pfvf = netdev_priv(netdev);
975 int offset, num_segs, free_sqe;
976 struct nix_sqe_hdr_s *sqe_hdr;
978 /* Check if there is room for new SQE.
979 * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
980 * will give free SQE count.
982 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
984 if (free_sqe < sq->sqe_thresh ||
985 free_sqe < otx2_get_sqe_count(pfvf, skb))
988 num_segs = skb_shinfo(skb)->nr_frags + 1;
990 /* If SKB doesn't fit in a single SQE, linearize it.
991 * TODO: Consider adding JUMP descriptor instead.
993 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
994 if (__skb_linearize(skb)) {
995 dev_kfree_skb_any(skb);
998 num_segs = skb_shinfo(skb)->nr_frags + 1;
1001 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
1002 /* Insert vlan tag before giving pkt to tso */
1003 if (skb_vlan_tag_present(skb))
1004 skb = __vlan_hwaccel_push_inside(skb);
1005 otx2_sq_append_tso(pfvf, sq, skb, qidx);
1009 /* Set SQE's SEND_HDR.
1010 * Do not clear the first 64bit as it contains constant info.
1012 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1013 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1014 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
1015 offset = sizeof(*sqe_hdr);
1017 /* Add extended header if needed */
1018 otx2_sqe_add_ext(pfvf, sq, skb, &offset);
1020 /* Add SG subdesc with data frags */
1021 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
1022 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
1026 otx2_set_txtstamp(pfvf, skb, sq, &offset);
1028 sqe_hdr->sizem1 = (offset / 16) - 1;
1030 netdev_tx_sent_queue(txq, skb->len);
1032 /* Flush SQE to HW */
1033 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1037 EXPORT_SYMBOL(otx2_sq_append_skb);
1039 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1041 struct nix_cqe_rx_s *cqe;
1042 int processed_cqe = 0;
1046 xdp_rxq_info_unreg(&cq->xdp_rxq);
1048 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1051 while (cq->pend_cqe) {
1052 cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
1058 if (cqe->sg.segs > 1) {
1059 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
1062 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1063 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1064 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
1065 put_page(virt_to_page(phys_to_virt(pa)));
1068 /* Free CQEs to HW */
1069 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1070 ((u64)cq->cq_idx << 32) | processed_cqe);
1073 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1075 struct sk_buff *skb = NULL;
1076 struct otx2_snd_queue *sq;
1077 struct nix_cqe_tx_s *cqe;
1078 int processed_cqe = 0;
1081 sq = &pfvf->qset.sq[cq->cint_idx];
1083 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1086 while (cq->pend_cqe) {
1087 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
1093 sg = &sq->sg[cqe->comp.sqe_id];
1094 skb = (struct sk_buff *)sg->skb;
1096 otx2_dma_unmap_skb_frags(pfvf, sg);
1097 dev_kfree_skb_any(skb);
1098 sg->skb = (u64)NULL;
1102 /* Free CQEs to HW */
1103 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1104 ((u64)cq->cq_idx << 32) | processed_cqe);
1107 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
1109 struct msg_req *msg;
1112 mutex_lock(&pfvf->mbox.lock);
1114 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
1116 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
1119 mutex_unlock(&pfvf->mbox.lock);
1123 err = otx2_sync_mbox_msg(&pfvf->mbox);
1124 mutex_unlock(&pfvf->mbox.lock);
1128 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
1129 int len, int *offset)
1131 struct nix_sqe_sg_s *sg = NULL;
1134 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
1135 sg->ld_type = NIX_SEND_LDTYPE_LDD;
1136 sg->subdc = NIX_SUBDC_SG;
1138 sg->seg1_size = len;
1139 iova = (void *)sg + sizeof(*sg);
1141 *offset += sizeof(*sg) + sizeof(u64);
1143 sq->sg[sq->head].dma_addr[0] = dma_addr;
1144 sq->sg[sq->head].size[0] = len;
1145 sq->sg[sq->head].num_segs = 1;
1148 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
1150 struct nix_sqe_hdr_s *sqe_hdr;
1151 struct otx2_snd_queue *sq;
1152 int offset, free_sqe;
1154 sq = &pfvf->qset.sq[qidx];
1155 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
1156 if (free_sqe < sq->sqe_thresh)
1159 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1161 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1163 if (!sqe_hdr->total) {
1164 sqe_hdr->aura = sq->aura_id;
1169 sqe_hdr->total = len;
1170 sqe_hdr->sqe_id = sq->head;
1172 offset = sizeof(*sqe_hdr);
1174 otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
1175 sqe_hdr->sizem1 = (offset / 16) - 1;
1176 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1181 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
1182 struct bpf_prog *prog,
1183 struct nix_cqe_rx_s *cqe,
1184 struct otx2_cq_queue *cq)
1186 unsigned char *hard_start, *data;
1187 int qidx = cq->cq_idx;
1188 struct xdp_buff xdp;
1194 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1195 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1196 page = virt_to_page(phys_to_virt(pa));
1198 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
1200 data = (unsigned char *)phys_to_virt(pa);
1201 hard_start = page_address(page);
1202 xdp_prepare_buff(&xdp, hard_start, data - hard_start,
1203 cqe->sg.seg_size, false);
1205 act = bpf_prog_run_xdp(prog, &xdp);
1211 qidx += pfvf->hw.tx_queues;
1213 return otx2_xdp_sq_append_pkt(pfvf, iova,
1214 cqe->sg.seg_size, qidx);
1217 err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
1219 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1226 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
1229 trace_xdp_exception(pfvf->netdev, prog, act);
1232 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,