2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/iommu.h>
20 #include "nicvf_queues.h"
22 #define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
24 static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
26 /* Translation is installed only when IOMMU is present */
27 if (nic->iommu_domain)
28 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
32 static void nicvf_get_page(struct nicvf *nic)
34 if (!nic->rb_pageref || !nic->rb_page)
37 page_ref_add(nic->rb_page, nic->rb_pageref);
41 /* Poll a register for a specific value */
42 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
43 u64 reg, int bit_pos, int bits, int val)
49 bit_mask = (1ULL << bits) - 1;
50 bit_mask = (bit_mask << bit_pos);
53 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
54 if (((reg_val & bit_mask) >> bit_pos) == val)
56 usleep_range(1000, 2000);
59 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
63 /* Allocate memory for a queue's descriptors */
64 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
65 int q_len, int desc_size, int align_bytes)
68 dmem->size = (desc_size * q_len) + align_bytes;
69 /* Save address, need it while freeing */
70 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
71 &dmem->dma, GFP_KERNEL);
72 if (!dmem->unalign_base)
75 /* Align memory address for 'align_bytes' */
76 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
77 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
81 /* Free queue's descriptor memory */
82 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
87 dma_free_coherent(&nic->pdev->dev, dmem->size,
88 dmem->unalign_base, dmem->dma);
89 dmem->unalign_base = NULL;
93 /* Allocate buffer for packet reception
94 * HW returns memory address where packet is DMA'ed but not a pointer
95 * into RBDR ring, so save buffer address at the start of fragment and
96 * align the start address to a cache aligned address
98 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
99 u32 buf_len, u64 **rbuf)
101 int order = NICVF_PAGE_ORDER;
103 /* Check if request can be accomodated in previous allocated page */
105 ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
112 /* Allocate a new page */
113 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
116 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
119 nic->rb_page_offset = 0;
121 /* HW will ensure data coherency, CPU sync not required */
122 *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
123 nic->rb_page_offset, buf_len,
125 DMA_ATTR_SKIP_CPU_SYNC));
126 if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
127 if (!nic->rb_page_offset)
128 __free_pages(nic->rb_page, order);
132 nic->rb_page_offset += buf_len;
137 /* Build skb around receive buffer */
138 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
144 data = phys_to_virt(rb_ptr);
146 /* Now build an skb to give to stack */
147 skb = build_skb(data, RCV_FRAG_LEN);
149 put_page(virt_to_page(data));
157 /* Allocate RBDR ring and populate receive buffers */
158 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
159 int ring_len, int buf_size)
163 struct rbdr_entry_t *desc;
166 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
167 sizeof(struct rbdr_entry_t),
168 NICVF_RCV_BUF_ALIGN_BYTES);
172 rbdr->desc = rbdr->dmem.base;
173 /* Buffer size has to be in multiples of 128 bytes */
174 rbdr->dma_size = buf_size;
176 rbdr->thresh = RBDR_THRESH;
181 for (idx = 0; idx < ring_len; idx++) {
182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
185 /* To free already allocated and mapped ones */
186 rbdr->tail = idx - 1;
190 desc = GET_RBDR_DESC(rbdr, idx);
191 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
199 /* Free RBDR ring and its receive buffers */
200 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
203 u64 buf_addr, phys_addr;
204 struct rbdr_entry_t *desc;
209 rbdr->enable = false;
210 if (!rbdr->dmem.base)
216 /* Release page references */
217 while (head != tail) {
218 desc = GET_RBDR_DESC(rbdr, head);
219 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
220 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
221 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
222 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
224 put_page(virt_to_page(phys_to_virt(phys_addr)));
226 head &= (rbdr->dmem.q_len - 1);
228 /* Release buffer of tail desc */
229 desc = GET_RBDR_DESC(rbdr, tail);
230 buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
231 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
232 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
233 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
235 put_page(virt_to_page(phys_to_virt(phys_addr)));
238 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
241 /* Refill receive buffer descriptors with new buffers.
243 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
245 struct queue_set *qs = nic->qs;
246 int rbdr_idx = qs->rbdr_cnt;
250 struct rbdr_entry_t *desc;
258 rbdr = &qs->rbdr[rbdr_idx];
259 /* Check if it's enabled */
263 /* Get no of desc's to be refilled */
264 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
266 /* Doorbell can be ringed with a max of ring size minus 1 */
267 if (qcount >= (qs->rbdr_len - 1))
270 refill_rb_cnt = qs->rbdr_len - qcount - 1;
272 /* Start filling descs from tail */
273 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
274 while (refill_rb_cnt) {
276 tail &= (rbdr->dmem.q_len - 1);
278 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
281 desc = GET_RBDR_DESC(rbdr, tail);
282 desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
289 /* make sure all memory stores are done before ringing doorbell */
292 /* Check if buffer allocation failed */
294 nic->rb_alloc_fail = true;
296 nic->rb_alloc_fail = false;
299 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
302 /* Re-enable RBDR interrupts only if buffer allocation is success */
303 if (!nic->rb_alloc_fail && rbdr->enable &&
304 netif_running(nic->pnicvf->netdev))
305 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
311 /* Alloc rcv buffers in non-atomic mode for better success */
312 void nicvf_rbdr_work(struct work_struct *work)
314 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
316 nicvf_refill_rbdr(nic, GFP_KERNEL);
317 if (nic->rb_alloc_fail)
318 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
320 nic->rb_work_scheduled = false;
323 /* In Softirq context, alloc rcv buffers in atomic mode */
324 void nicvf_rbdr_task(unsigned long data)
326 struct nicvf *nic = (struct nicvf *)data;
328 nicvf_refill_rbdr(nic, GFP_ATOMIC);
329 if (nic->rb_alloc_fail) {
330 nic->rb_work_scheduled = true;
331 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
335 /* Initialize completion queue */
336 static int nicvf_init_cmp_queue(struct nicvf *nic,
337 struct cmp_queue *cq, int q_len)
341 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
342 NICVF_CQ_BASE_ALIGN_BYTES);
346 cq->desc = cq->dmem.base;
347 cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
348 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
353 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
360 nicvf_free_q_desc_mem(nic, &cq->dmem);
363 /* Initialize transmit queue */
364 static int nicvf_init_snd_queue(struct nicvf *nic,
365 struct snd_queue *sq, int q_len)
369 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
370 NICVF_SQ_BASE_ALIGN_BYTES);
374 sq->desc = sq->dmem.base;
375 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
380 atomic_set(&sq->free_cnt, q_len - 1);
381 sq->thresh = SND_QUEUE_THRESH;
383 /* Preallocate memory for TSO segment's header */
384 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
385 q_len * TSO_HEADER_SIZE,
386 &sq->tso_hdrs_phys, GFP_KERNEL);
393 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
394 int hdr_sqe, u8 subdesc_cnt)
397 struct sq_gather_subdesc *gather;
399 /* Unmap DMA mapped skb data buffers */
400 for (idx = 0; idx < subdesc_cnt; idx++) {
402 hdr_sqe &= (sq->dmem.q_len - 1);
403 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
404 /* HW will ensure data coherency, CPU sync not required */
405 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
406 gather->size, DMA_TO_DEVICE,
407 DMA_ATTR_SKIP_CPU_SYNC);
411 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
414 struct sq_hdr_subdesc *hdr;
415 struct sq_hdr_subdesc *tso_sqe;
423 dma_free_coherent(&nic->pdev->dev,
424 sq->dmem.q_len * TSO_HEADER_SIZE,
425 sq->tso_hdrs, sq->tso_hdrs_phys);
427 /* Free pending skbs in the queue */
429 while (sq->head != sq->tail) {
430 skb = (struct sk_buff *)sq->skbuff[sq->head];
433 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
434 /* Check for dummy descriptor used for HW TSO offload on 88xx */
435 if (hdr->dont_send) {
436 /* Get actual TSO descriptors and unmap them */
438 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
439 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
440 tso_sqe->subdesc_cnt);
442 nicvf_unmap_sndq_buffers(nic, sq, sq->head,
445 dev_kfree_skb_any(skb);
448 sq->head &= (sq->dmem.q_len - 1);
451 nicvf_free_q_desc_mem(nic, &sq->dmem);
454 static void nicvf_reclaim_snd_queue(struct nicvf *nic,
455 struct queue_set *qs, int qidx)
457 /* Disable send queue */
458 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
459 /* Check if SQ is stopped */
460 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
462 /* Reset send queue */
463 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
466 static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
467 struct queue_set *qs, int qidx)
469 union nic_mbx mbx = {};
471 /* Make sure all packets in the pipeline are written back into mem */
472 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
473 nicvf_send_msg_to_pf(nic, &mbx);
476 static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
477 struct queue_set *qs, int qidx)
479 /* Disable timer threshold (doesn't get reset upon CQ reset */
480 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
481 /* Disable completion queue */
482 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
483 /* Reset completion queue */
484 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
487 static void nicvf_reclaim_rbdr(struct nicvf *nic,
488 struct rbdr *rbdr, int qidx)
493 /* Save head and tail pointers for feeing up buffers */
494 rbdr->head = nicvf_queue_reg_read(nic,
495 NIC_QSET_RBDR_0_1_HEAD,
497 rbdr->tail = nicvf_queue_reg_read(nic,
498 NIC_QSET_RBDR_0_1_TAIL,
501 /* If RBDR FIFO is in 'FAIL' state then do a reset first
504 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
505 if (((fifo_state >> 62) & 0x03) == 0x3)
506 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
507 qidx, NICVF_RBDR_RESET);
510 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
511 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
514 tmp = nicvf_queue_reg_read(nic,
515 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
517 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
519 usleep_range(1000, 2000);
522 netdev_err(nic->netdev,
523 "Failed polling on prefetch status\n");
527 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
528 qidx, NICVF_RBDR_RESET);
530 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
532 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
533 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
537 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
542 rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
544 /* Enable first VLAN stripping */
545 if (features & NETIF_F_HW_VLAN_CTAG_RX)
546 rq_cfg |= (1ULL << 25);
548 rq_cfg &= ~(1ULL << 25);
549 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
551 /* Configure Secondary Qsets, if any */
552 for (sqs = 0; sqs < nic->sqs_count; sqs++)
553 if (nic->snicvf[sqs])
554 nicvf_queue_reg_write(nic->snicvf[sqs],
555 NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
558 static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
560 union nic_mbx mbx = {};
562 /* Reset all RQ/SQ and VF stats */
563 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
564 mbx.reset_stat.rx_stat_mask = 0x3FFF;
565 mbx.reset_stat.tx_stat_mask = 0x1F;
566 mbx.reset_stat.rq_stat_mask = 0xFFFF;
567 mbx.reset_stat.sq_stat_mask = 0xFFFF;
568 nicvf_send_msg_to_pf(nic, &mbx);
571 /* Configures receive queue */
572 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
573 int qidx, bool enable)
575 union nic_mbx mbx = {};
576 struct rcv_queue *rq;
577 struct rq_cfg rq_cfg;
582 /* Disable receive queue */
583 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
586 nicvf_reclaim_rcv_queue(nic, qs, qidx);
590 rq->cq_qs = qs->vnic_id;
592 rq->start_rbdr_qs = qs->vnic_id;
593 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
594 rq->cont_rbdr_qs = qs->vnic_id;
595 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
596 /* all writes of RBDR data to be loaded into L2 Cache as well*/
599 /* Send a mailbox msg to PF to config RQ */
600 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
601 mbx.rq.qs_num = qs->vnic_id;
602 mbx.rq.rq_num = qidx;
603 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
604 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
605 (rq->cont_qs_rbdr_idx << 8) |
606 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
607 nicvf_send_msg_to_pf(nic, &mbx);
609 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
610 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
611 (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
613 nicvf_send_msg_to_pf(nic, &mbx);
616 * Enable CQ drop to reserve sufficient CQEs for all tx packets
618 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
619 mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
620 (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
621 (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
622 nicvf_send_msg_to_pf(nic, &mbx);
624 if (!nic->sqs_mode && (qidx == 0)) {
625 /* Enable checking L3/L4 length and TCP/UDP checksums
626 * Also allow IPv6 pkts with zero UDP checksum.
628 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
629 (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
630 nicvf_config_vlan_stripping(nic, nic->netdev->features);
633 /* Enable Receive queue */
634 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
637 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
640 /* Configures completion queue */
641 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
642 int qidx, bool enable)
644 struct cmp_queue *cq;
645 struct cq_cfg cq_cfg;
651 nicvf_reclaim_cmp_queue(nic, qs, qidx);
655 /* Reset completion queue */
656 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
661 spin_lock_init(&cq->lock);
662 /* Set completion queue base address */
663 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
664 qidx, (u64)(cq->dmem.phys_base));
666 /* Enable Completion queue */
667 memset(&cq_cfg, 0, sizeof(struct cq_cfg));
671 cq_cfg.qsize = ilog2(qs->cq_len >> 10);
673 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
675 /* Set threshold value for interrupt generation */
676 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
677 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
678 qidx, CMP_QUEUE_TIMER_THRESH);
681 /* Configures transmit queue */
682 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
683 int qidx, bool enable)
685 union nic_mbx mbx = {};
686 struct snd_queue *sq;
687 struct sq_cfg sq_cfg;
693 nicvf_reclaim_snd_queue(nic, qs, qidx);
697 /* Reset send queue */
698 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
700 sq->cq_qs = qs->vnic_id;
703 /* Send a mailbox msg to PF to config SQ */
704 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
705 mbx.sq.qs_num = qs->vnic_id;
706 mbx.sq.sq_num = qidx;
707 mbx.sq.sqs_mode = nic->sqs_mode;
708 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
709 nicvf_send_msg_to_pf(nic, &mbx);
711 /* Set queue base address */
712 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
713 qidx, (u64)(sq->dmem.phys_base));
715 /* Enable send queue & set queue size */
716 memset(&sq_cfg, 0, sizeof(struct sq_cfg));
720 sq_cfg.qsize = ilog2(qs->sq_len >> 10);
721 sq_cfg.tstmp_bgx_intf = 0;
722 /* CQ's level at which HW will stop processing SQEs to avoid
723 * transmitting a pkt with no space in CQ to post CQE_TX.
725 sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
726 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
728 /* Set threshold value for interrupt generation */
729 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
731 /* Set queue:cpu affinity for better load distribution */
732 if (cpu_online(qidx)) {
733 cpumask_set_cpu(qidx, &sq->affinity_mask);
734 netif_set_xps_queue(nic->netdev,
735 &sq->affinity_mask, qidx);
739 /* Configures receive buffer descriptor ring */
740 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
741 int qidx, bool enable)
744 struct rbdr_cfg rbdr_cfg;
746 rbdr = &qs->rbdr[qidx];
747 nicvf_reclaim_rbdr(nic, rbdr, qidx);
751 /* Set descriptor base address */
752 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
753 qidx, (u64)(rbdr->dmem.phys_base));
755 /* Enable RBDR & set queue size */
756 /* Buffer size should be in multiples of 128 bytes */
757 memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
761 rbdr_cfg.qsize = RBDR_SIZE;
762 rbdr_cfg.avg_con = 0;
763 rbdr_cfg.lines = rbdr->dma_size / 128;
764 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
765 qidx, *(u64 *)&rbdr_cfg);
768 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
769 qidx, qs->rbdr_len - 1);
771 /* Set threshold value for interrupt generation */
772 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
773 qidx, rbdr->thresh - 1);
776 /* Requests PF to assign and enable Qset */
777 void nicvf_qset_config(struct nicvf *nic, bool enable)
779 union nic_mbx mbx = {};
780 struct queue_set *qs = nic->qs;
781 struct qs_cfg *qs_cfg;
784 netdev_warn(nic->netdev,
785 "Qset is still not allocated, don't init queues\n");
790 qs->vnic_id = nic->vf_id;
792 /* Send a mailbox msg to PF to config Qset */
793 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
794 mbx.qs.num = qs->vnic_id;
795 mbx.qs.sqs_count = nic->sqs_count;
798 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
804 qs_cfg->vnic = qs->vnic_id;
806 nicvf_send_msg_to_pf(nic, &mbx);
809 static void nicvf_free_resources(struct nicvf *nic)
812 struct queue_set *qs = nic->qs;
814 /* Free receive buffer descriptor ring */
815 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
816 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
818 /* Free completion queue */
819 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
820 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
822 /* Free send queue */
823 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
824 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
827 static int nicvf_alloc_resources(struct nicvf *nic)
830 struct queue_set *qs = nic->qs;
832 /* Alloc receive buffer descriptor ring */
833 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
834 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
839 /* Alloc send queue */
840 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
841 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
845 /* Alloc completion queue */
846 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
847 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
853 nicvf_free_resources(nic);
857 int nicvf_set_qset_resources(struct nicvf *nic)
859 struct queue_set *qs;
861 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
866 /* Set count of each queue */
867 qs->rbdr_cnt = DEFAULT_RBDR_CNT;
868 qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
869 qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
870 qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
872 /* Set queue lengths */
873 qs->rbdr_len = RCV_BUF_COUNT;
874 qs->sq_len = SND_QUEUE_LEN;
875 qs->cq_len = CMP_QUEUE_LEN;
877 nic->rx_queues = qs->rq_cnt;
878 nic->tx_queues = qs->sq_cnt;
883 int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
885 bool disable = false;
886 struct queue_set *qs = nic->qs;
887 struct queue_set *pqs = nic->pnicvf->qs;
893 /* Take primary VF's queue lengths.
894 * This is needed to take queue lengths set from ethtool
895 * into consideration.
897 if (nic->sqs_mode && pqs) {
898 qs->cq_len = pqs->cq_len;
899 qs->sq_len = pqs->sq_len;
903 if (nicvf_alloc_resources(nic))
906 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
907 nicvf_snd_queue_config(nic, qs, qidx, enable);
908 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
909 nicvf_cmp_queue_config(nic, qs, qidx, enable);
910 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
911 nicvf_rbdr_config(nic, qs, qidx, enable);
912 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
913 nicvf_rcv_queue_config(nic, qs, qidx, enable);
915 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
916 nicvf_rcv_queue_config(nic, qs, qidx, disable);
917 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
918 nicvf_rbdr_config(nic, qs, qidx, disable);
919 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
920 nicvf_snd_queue_config(nic, qs, qidx, disable);
921 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
922 nicvf_cmp_queue_config(nic, qs, qidx, disable);
924 nicvf_free_resources(nic);
927 /* Reset RXQ's stats.
928 * SQ's stats will get reset automatically once SQ is reset.
930 nicvf_reset_rcv_queue_stats(nic);
935 /* Get a free desc from SQ
936 * returns descriptor ponter & descriptor number
938 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
943 atomic_sub(desc_cnt, &sq->free_cnt);
944 sq->tail += desc_cnt;
945 sq->tail &= (sq->dmem.q_len - 1);
950 /* Rollback to previous tail pointer when descriptors not used */
951 static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
952 int qentry, int desc_cnt)
955 atomic_add(desc_cnt, &sq->free_cnt);
958 /* Free descriptor back to SQ for future use */
959 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
961 atomic_add(desc_cnt, &sq->free_cnt);
962 sq->head += desc_cnt;
963 sq->head &= (sq->dmem.q_len - 1);
966 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
969 qentry &= (sq->dmem.q_len - 1);
973 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
977 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
978 sq_cfg |= NICVF_SQ_EN;
979 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
980 /* Ring doorbell so that H/W restarts processing SQEs */
981 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
984 void nicvf_sq_disable(struct nicvf *nic, int qidx)
988 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
989 sq_cfg &= ~NICVF_SQ_EN;
990 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
993 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
998 struct nicvf *nic = netdev_priv(netdev);
999 struct sq_hdr_subdesc *hdr;
1001 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1002 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1003 while (sq->head != head) {
1004 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1005 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1006 nicvf_put_sq_desc(sq, 1);
1009 skb = (struct sk_buff *)sq->skbuff[sq->head];
1011 dev_kfree_skb_any(skb);
1012 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
1013 atomic64_add(hdr->tot_len,
1014 (atomic64_t *)&netdev->stats.tx_bytes);
1015 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1019 /* Calculate no of SQ subdescriptors needed to transmit all
1020 * segments of this TSO packet.
1021 * Taken from 'Tilera network driver' with a minor modification.
1023 static int nicvf_tso_count_subdescs(struct sk_buff *skb)
1025 struct skb_shared_info *sh = skb_shinfo(skb);
1026 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1027 unsigned int data_len = skb->len - sh_len;
1028 unsigned int p_len = sh->gso_size;
1029 long f_id = -1; /* id of the current fragment */
1030 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1031 long f_used = 0; /* bytes used from the current fragment */
1032 long n; /* size of the current piece of payload */
1036 for (segment = 0; segment < sh->gso_segs; segment++) {
1037 unsigned int p_used = 0;
1039 /* One edesc for header and for each piece of the payload. */
1040 for (num_edescs++; p_used < p_len; num_edescs++) {
1041 /* Advance as needed. */
1042 while (f_used >= f_size) {
1044 f_size = skb_frag_size(&sh->frags[f_id]);
1048 /* Use bytes from the current fragment. */
1050 if (n > f_size - f_used)
1051 n = f_size - f_used;
1056 /* The last segment may be less than gso_size. */
1058 if (data_len < p_len)
1062 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
1063 return num_edescs + sh->gso_segs;
1066 #define POST_CQE_DESC_COUNT 2
1068 /* Get the number of SQ descriptors needed to xmit this skb */
1069 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
1071 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1073 if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
1074 subdesc_cnt = nicvf_tso_count_subdescs(skb);
1078 /* Dummy descriptors to get TSO pkt completion notification */
1079 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
1080 subdesc_cnt += POST_CQE_DESC_COUNT;
1082 if (skb_shinfo(skb)->nr_frags)
1083 subdesc_cnt += skb_shinfo(skb)->nr_frags;
1088 /* Add SQ HEADER subdescriptor.
1089 * First subdescriptor for every send descriptor.
1092 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1093 int subdesc_cnt, struct sk_buff *skb, int len)
1096 struct sq_hdr_subdesc *hdr;
1098 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1099 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1100 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1102 if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1103 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
1104 * segment transmitted on 88xx.
1106 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
1108 sq->skbuff[qentry] = (u64)skb;
1109 /* Enable notification via CQE after processing SQE */
1111 /* No of subdescriptors following this */
1112 hdr->subdesc_cnt = subdesc_cnt;
1116 /* Offload checksum calculation to HW */
1117 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1118 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1119 hdr->l3_offset = skb_network_offset(skb);
1120 hdr->l4_offset = skb_transport_offset(skb);
1122 proto = ip_hdr(skb)->protocol;
1125 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1128 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1131 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1136 if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1138 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1139 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1140 /* For non-tunneled pkts, point this to L2 ethertype */
1141 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1142 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1146 /* SQ GATHER subdescriptor
1147 * Must follow HDR descriptor
1149 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1152 struct sq_gather_subdesc *gather;
1154 qentry &= (sq->dmem.q_len - 1);
1155 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1157 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1158 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1159 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1160 gather->size = size;
1161 gather->addr = data;
1164 /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1165 * packet so that a CQE is posted as a notifation for transmission of
1168 static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1169 int tso_sqe, struct sk_buff *skb)
1171 struct sq_imm_subdesc *imm;
1172 struct sq_hdr_subdesc *hdr;
1174 sq->skbuff[qentry] = (u64)skb;
1176 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1177 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1178 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1179 /* Enable notification via CQE after processing SQE */
1181 /* There is no packet to transmit here */
1183 hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1185 /* Actual TSO header SQE index, needed for cleanup */
1186 hdr->rsvd2 = tso_sqe;
1188 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1189 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1190 memset(imm, 0, SND_QUEUE_DESC_SIZE);
1191 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1195 static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1196 int sq_num, int desc_cnt)
1198 struct netdev_queue *txq;
1200 txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1201 skb_get_queue_mapping(skb));
1203 netdev_tx_sent_queue(txq, skb->len);
1205 /* make sure all memory stores are done before ringing doorbell */
1208 /* Inform HW to xmit all TSO segments */
1209 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1213 /* Segment a TSO packet into 'gso_size' segments and append
1214 * them to SQ for transfer
1216 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1217 int sq_num, int qentry, struct sk_buff *skb)
1220 int seg_subdescs = 0, desc_cnt = 0;
1221 int seg_len, total_len, data_left;
1222 int hdr_qentry = qentry;
1223 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1225 tso_start(skb, &tso);
1226 total_len = skb->len - hdr_len;
1227 while (total_len > 0) {
1230 /* Save Qentry for adding HDR_SUBDESC at the end */
1231 hdr_qentry = qentry;
1233 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1234 total_len -= data_left;
1236 /* Add segment's header */
1237 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1238 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1239 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1240 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1242 qentry * TSO_HEADER_SIZE);
1243 /* HDR_SUDESC + GATHER */
1247 /* Add segment's payload fragments */
1248 while (data_left > 0) {
1251 size = min_t(int, tso.size, data_left);
1253 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1254 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1255 virt_to_phys(tso.data));
1260 tso_build_data(skb, &tso, size);
1262 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
1263 seg_subdescs - 1, skb, seg_len);
1264 sq->skbuff[hdr_qentry] = (u64)NULL;
1265 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1267 desc_cnt += seg_subdescs;
1269 /* Save SKB in the last segment for freeing */
1270 sq->skbuff[hdr_qentry] = (u64)skb;
1272 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1274 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1278 /* Append an skb to a SQ for packet transfer. */
1279 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1280 struct sk_buff *skb, u8 sq_num)
1283 int subdesc_cnt, hdr_sqe = 0;
1287 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1288 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1291 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1293 /* Check if its a TSO packet */
1294 if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
1295 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
1297 /* Add SQ header subdesc */
1298 nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1302 /* Add SQ gather subdescs */
1303 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1304 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1305 /* HW will ensure data coherency, CPU sync not required */
1306 dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1307 offset_in_page(skb->data), size,
1308 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1309 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1310 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1314 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1316 /* Check for scattered buffer */
1317 if (!skb_is_nonlinear(skb))
1320 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1321 const struct skb_frag_struct *frag;
1323 frag = &skb_shinfo(skb)->frags[i];
1325 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1326 size = skb_frag_size(frag);
1327 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1328 skb_frag_page(frag),
1329 frag->page_offset, size,
1331 DMA_ATTR_SKIP_CPU_SYNC);
1332 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1333 /* Free entire chain of mapped buffers
1334 * here 'i' = frags mapped + above mapped skb->data
1336 nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1337 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1340 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1344 if (nic->t88 && skb_shinfo(skb)->gso_size) {
1345 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1346 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
1349 nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
1354 /* Use original PCI dev for debug log */
1356 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1360 static inline unsigned frag_num(unsigned i)
1363 return (i & ~3) + 3 - (i & 3);
1369 /* Returns SKB for a received packet */
1370 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1373 int payload_len = 0;
1374 struct sk_buff *skb = NULL;
1377 u16 *rb_lens = NULL;
1378 u64 *rb_ptrs = NULL;
1381 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1382 /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1383 * CQE_RX at word6, hence buffer pointers move by word
1385 * Use existing 'hw_tso' flag which will be set for all chips
1386 * except 88xx pass1 instead of a additional cache line
1387 * access (or miss) by using pci dev's revision.
1390 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1392 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
1394 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1395 payload_len = rb_lens[frag_num(frag)];
1396 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1399 dev_kfree_skb_any(skb);
1404 /* First fragment */
1405 dma_unmap_page_attrs(&nic->pdev->dev,
1406 *rb_ptrs - cqe_rx->align_pad,
1407 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1408 DMA_ATTR_SKIP_CPU_SYNC);
1409 skb = nicvf_rb_ptr_to_skb(nic,
1410 phys_addr - cqe_rx->align_pad,
1414 skb_reserve(skb, cqe_rx->align_pad);
1415 skb_put(skb, payload_len);
1418 dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
1419 RCV_FRAG_LEN, DMA_FROM_DEVICE,
1420 DMA_ATTR_SKIP_CPU_SYNC);
1421 page = virt_to_page(phys_to_virt(phys_addr));
1422 offset = phys_to_virt(phys_addr) - page_address(page);
1423 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1424 offset, payload_len, RCV_FRAG_LEN);
1426 /* Next buffer pointer */
1432 static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
1438 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1441 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1443 case NICVF_INTR_RBDR:
1444 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1446 case NICVF_INTR_PKT_DROP:
1447 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1449 case NICVF_INTR_TCP_TIMER:
1450 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1452 case NICVF_INTR_MBOX:
1453 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1455 case NICVF_INTR_QS_ERR:
1456 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1465 /* Enable interrupt */
1466 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1468 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1471 netdev_dbg(nic->netdev,
1472 "Failed to enable interrupt: unknown type\n");
1475 nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1476 nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1479 /* Disable interrupt */
1480 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1482 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1485 netdev_dbg(nic->netdev,
1486 "Failed to disable interrupt: unknown type\n");
1490 nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1493 /* Clear interrupt */
1494 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1496 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1499 netdev_dbg(nic->netdev,
1500 "Failed to clear interrupt: unknown type\n");
1504 nicvf_reg_write(nic, NIC_VF_INT, mask);
1507 /* Check if interrupt is enabled */
1508 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1510 u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1511 /* If interrupt type is unknown, we treat it disabled. */
1513 netdev_dbg(nic->netdev,
1514 "Failed to check interrupt enable: unknown type\n");
1518 return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1521 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1523 struct rcv_queue *rq;
1525 #define GET_RQ_STATS(reg) \
1526 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1527 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1529 rq = &nic->qs->rq[rq_idx];
1530 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1531 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1534 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1536 struct snd_queue *sq;
1538 #define GET_SQ_STATS(reg) \
1539 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1540 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1542 sq = &nic->qs->sq[sq_idx];
1543 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1544 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1547 /* Check for errors in the receive cmp.queue entry */
1548 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1550 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1553 if (netif_msg_rx_err(nic))
1554 netdev_err(nic->netdev,
1555 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1557 cqe_rx->err_level, cqe_rx->err_opcode);
1559 switch (cqe_rx->err_opcode) {
1560 case CQ_RX_ERROP_RE_PARTIAL:
1561 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1563 case CQ_RX_ERROP_RE_JABBER:
1564 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1566 case CQ_RX_ERROP_RE_FCS:
1567 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1569 case CQ_RX_ERROP_RE_RX_CTL:
1570 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1572 case CQ_RX_ERROP_PREL2_ERR:
1573 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1575 case CQ_RX_ERROP_L2_MAL:
1576 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1578 case CQ_RX_ERROP_L2_OVERSIZE:
1579 this_cpu_inc(nic->drv_stats->rx_oversize);
1581 case CQ_RX_ERROP_L2_UNDERSIZE:
1582 this_cpu_inc(nic->drv_stats->rx_undersize);
1584 case CQ_RX_ERROP_L2_LENMISM:
1585 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1587 case CQ_RX_ERROP_L2_PCLP:
1588 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1590 case CQ_RX_ERROP_IP_NOT:
1591 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1593 case CQ_RX_ERROP_IP_CSUM_ERR:
1594 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1596 case CQ_RX_ERROP_IP_MAL:
1597 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1599 case CQ_RX_ERROP_IP_MALD:
1600 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1602 case CQ_RX_ERROP_IP_HOP:
1603 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1605 case CQ_RX_ERROP_L3_PCLP:
1606 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1608 case CQ_RX_ERROP_L4_MAL:
1609 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1611 case CQ_RX_ERROP_L4_CHK:
1612 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1614 case CQ_RX_ERROP_UDP_LEN:
1615 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1617 case CQ_RX_ERROP_L4_PORT:
1618 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1620 case CQ_RX_ERROP_TCP_FLAG:
1621 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1623 case CQ_RX_ERROP_TCP_OFFSET:
1624 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1626 case CQ_RX_ERROP_L4_PCLP:
1627 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1629 case CQ_RX_ERROP_RBDR_TRUNC:
1630 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1637 /* Check for errors in the send cmp.queue entry */
1638 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1640 switch (cqe_tx->send_status) {
1641 case CQ_TX_ERROP_GOOD:
1643 case CQ_TX_ERROP_DESC_FAULT:
1644 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1646 case CQ_TX_ERROP_HDR_CONS_ERR:
1647 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1649 case CQ_TX_ERROP_SUBDC_ERR:
1650 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1652 case CQ_TX_ERROP_MAX_SIZE_VIOL:
1653 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1655 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1656 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1658 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1659 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1661 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1662 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1664 case CQ_TX_ERROP_LOCK_VIOL:
1665 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1667 case CQ_TX_ERROP_DATA_FAULT:
1668 this_cpu_inc(nic->drv_stats->tx_data_fault);
1670 case CQ_TX_ERROP_TSTMP_CONFLICT:
1671 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1673 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1674 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1676 case CQ_TX_ERROP_MEM_FAULT:
1677 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1679 case CQ_TX_ERROP_CK_OVERLAP:
1680 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1682 case CQ_TX_ERROP_CK_OFLOW:
1683 this_cpu_inc(nic->drv_stats->tx_csum_overflow);