2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
19 #include "nicvf_queues.h"
27 #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
29 /* Poll a register for a specific value */
30 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
31 u64 reg, int bit_pos, int bits, int val)
37 bit_mask = (1ULL << bits) - 1;
38 bit_mask = (bit_mask << bit_pos);
41 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
42 if (((reg_val & bit_mask) >> bit_pos) == val)
44 usleep_range(1000, 2000);
47 netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
51 /* Allocate memory for a queue's descriptors */
52 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
53 int q_len, int desc_size, int align_bytes)
56 dmem->size = (desc_size * q_len) + align_bytes;
57 /* Save address, need it while freeing */
58 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
59 &dmem->dma, GFP_KERNEL);
60 if (!dmem->unalign_base)
63 /* Align memory address for 'align_bytes' */
64 dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
65 dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
69 /* Free queue's descriptor memory */
70 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
75 dma_free_coherent(&nic->pdev->dev, dmem->size,
76 dmem->unalign_base, dmem->dma);
77 dmem->unalign_base = NULL;
81 /* Allocate buffer for packet reception
82 * HW returns memory address where packet is DMA'ed but not a pointer
83 * into RBDR ring, so save buffer address at the start of fragment and
84 * align the start address to a cache aligned address
86 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
87 u32 buf_len, u64 **rbuf)
90 struct rbuf_info *rinfo;
91 int order = get_order(buf_len);
93 /* Check if request can be accomodated in previous allocated page */
95 if ((nic->rb_page_offset + buf_len + buf_len) >
96 (PAGE_SIZE << order)) {
99 nic->rb_page_offset += buf_len;
100 get_page(nic->rb_page);
104 /* Allocate a new page */
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
113 nic->rb_page_offset = 0;
116 data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
118 /* Align buffer addr to cache line i.e 128 bytes */
119 rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
120 /* Save page address for reference updation */
121 rinfo->page = nic->rb_page;
122 /* Store start address for later retrieval */
123 rinfo->data = (void *)data;
124 /* Store alignment offset */
125 rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
127 data += rinfo->offset;
129 /* Give next aligned address to hw for DMA */
130 *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
134 /* Retrieve actual buffer start address and build skb for received packet */
135 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
139 struct rbuf_info *rinfo;
141 rb_ptr = (u64)phys_to_virt(rb_ptr);
142 /* Get buffer start address and alignment offset */
143 rinfo = GET_RBUF_INFO(rb_ptr);
145 /* Now build an skb to give to stack */
146 skb = build_skb(rinfo->data, RCV_FRAG_LEN);
148 put_page(rinfo->page);
152 /* Set correct skb->data */
153 skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
155 prefetch((void *)rb_ptr);
159 /* Allocate RBDR ring and populate receive buffers */
160 static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
161 int ring_len, int buf_size)
165 struct rbdr_entry_t *desc;
168 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
169 sizeof(struct rbdr_entry_t),
170 NICVF_RCV_BUF_ALIGN_BYTES);
174 rbdr->desc = rbdr->dmem.base;
175 /* Buffer size has to be in multiples of 128 bytes */
176 rbdr->dma_size = buf_size;
178 rbdr->thresh = RBDR_THRESH;
181 for (idx = 0; idx < ring_len; idx++) {
182 err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
187 desc = GET_RBDR_DESC(rbdr, idx);
188 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
193 /* Free RBDR ring and its receive buffers */
194 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
198 struct rbdr_entry_t *desc;
199 struct rbuf_info *rinfo;
204 rbdr->enable = false;
205 if (!rbdr->dmem.base)
212 while (head != tail) {
213 desc = GET_RBDR_DESC(rbdr, head);
214 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
215 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
216 put_page(rinfo->page);
218 head &= (rbdr->dmem.q_len - 1);
220 /* Free SKB of tail desc */
221 desc = GET_RBDR_DESC(rbdr, tail);
222 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
223 rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
224 put_page(rinfo->page);
227 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
230 /* Refill receive buffer descriptors with new buffers.
232 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
234 struct queue_set *qs = nic->qs;
235 int rbdr_idx = qs->rbdr_cnt;
239 struct rbdr_entry_t *desc;
247 rbdr = &qs->rbdr[rbdr_idx];
248 /* Check if it's enabled */
252 /* Get no of desc's to be refilled */
253 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
255 /* Doorbell can be ringed with a max of ring size minus 1 */
256 if (qcount >= (qs->rbdr_len - 1))
259 refill_rb_cnt = qs->rbdr_len - qcount - 1;
261 /* Start filling descs from tail */
262 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
263 while (refill_rb_cnt) {
265 tail &= (rbdr->dmem.q_len - 1);
267 if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
270 desc = GET_RBDR_DESC(rbdr, tail);
271 desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
276 /* make sure all memory stores are done before ringing doorbell */
279 /* Check if buffer allocation failed */
281 nic->rb_alloc_fail = true;
283 nic->rb_alloc_fail = false;
286 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
289 /* Re-enable RBDR interrupts only if buffer allocation is success */
290 if (!nic->rb_alloc_fail && rbdr->enable)
291 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
297 /* Alloc rcv buffers in non-atomic mode for better success */
298 void nicvf_rbdr_work(struct work_struct *work)
300 struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
302 nicvf_refill_rbdr(nic, GFP_KERNEL);
303 if (nic->rb_alloc_fail)
304 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
306 nic->rb_work_scheduled = false;
309 /* In Softirq context, alloc rcv buffers in atomic mode */
310 void nicvf_rbdr_task(unsigned long data)
312 struct nicvf *nic = (struct nicvf *)data;
314 nicvf_refill_rbdr(nic, GFP_ATOMIC);
315 if (nic->rb_alloc_fail) {
316 nic->rb_work_scheduled = true;
317 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
321 /* Initialize completion queue */
322 static int nicvf_init_cmp_queue(struct nicvf *nic,
323 struct cmp_queue *cq, int q_len)
327 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
328 NICVF_CQ_BASE_ALIGN_BYTES);
332 cq->desc = cq->dmem.base;
333 cq->thresh = CMP_QUEUE_CQE_THRESH;
334 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
339 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
346 nicvf_free_q_desc_mem(nic, &cq->dmem);
349 /* Initialize transmit queue */
350 static int nicvf_init_snd_queue(struct nicvf *nic,
351 struct snd_queue *sq, int q_len)
355 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
356 NICVF_SQ_BASE_ALIGN_BYTES);
360 sq->desc = sq->dmem.base;
361 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
366 atomic_set(&sq->free_cnt, q_len - 1);
367 sq->thresh = SND_QUEUE_THRESH;
369 /* Preallocate memory for TSO segment's header */
370 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
371 q_len * TSO_HEADER_SIZE,
372 &sq->tso_hdrs_phys, GFP_KERNEL);
379 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
389 sq->tso_hdrs, sq->tso_hdrs_phys);
392 nicvf_free_q_desc_mem(nic, &sq->dmem);
395 static void nicvf_reclaim_snd_queue(struct nicvf *nic,
396 struct queue_set *qs, int qidx)
398 /* Disable send queue */
399 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
400 /* Check if SQ is stopped */
401 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
403 /* Reset send queue */
404 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
407 static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
408 struct queue_set *qs, int qidx)
410 union nic_mbx mbx = {};
412 /* Make sure all packets in the pipeline are written back into mem */
413 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
414 nicvf_send_msg_to_pf(nic, &mbx);
417 static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
418 struct queue_set *qs, int qidx)
420 /* Disable timer threshold (doesn't get reset upon CQ reset */
421 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
422 /* Disable completion queue */
423 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
424 /* Reset completion queue */
425 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
428 static void nicvf_reclaim_rbdr(struct nicvf *nic,
429 struct rbdr *rbdr, int qidx)
434 /* Save head and tail pointers for feeing up buffers */
435 rbdr->head = nicvf_queue_reg_read(nic,
436 NIC_QSET_RBDR_0_1_HEAD,
438 rbdr->tail = nicvf_queue_reg_read(nic,
439 NIC_QSET_RBDR_0_1_TAIL,
442 /* If RBDR FIFO is in 'FAIL' state then do a reset first
445 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
446 if (((fifo_state >> 62) & 0x03) == 0x3)
447 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
448 qidx, NICVF_RBDR_RESET);
451 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
452 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
455 tmp = nicvf_queue_reg_read(nic,
456 NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
458 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
460 usleep_range(1000, 2000);
463 netdev_err(nic->netdev,
464 "Failed polling on prefetch status\n");
468 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
469 qidx, NICVF_RBDR_RESET);
471 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
473 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
474 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
478 /* Configures receive queue */
479 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
480 int qidx, bool enable)
482 union nic_mbx mbx = {};
483 struct rcv_queue *rq;
484 struct rq_cfg rq_cfg;
489 /* Disable receive queue */
490 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
493 nicvf_reclaim_rcv_queue(nic, qs, qidx);
497 rq->cq_qs = qs->vnic_id;
499 rq->start_rbdr_qs = qs->vnic_id;
500 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
501 rq->cont_rbdr_qs = qs->vnic_id;
502 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
503 /* all writes of RBDR data to be loaded into L2 Cache as well*/
506 /* Send a mailbox msg to PF to config RQ */
507 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
508 mbx.rq.qs_num = qs->vnic_id;
509 mbx.rq.rq_num = qidx;
510 mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
511 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
512 (rq->cont_qs_rbdr_idx << 8) |
513 (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
514 nicvf_send_msg_to_pf(nic, &mbx);
516 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
517 mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
518 nicvf_send_msg_to_pf(nic, &mbx);
521 * Enable CQ drop to reserve sufficient CQEs for all tx packets
523 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
524 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
525 nicvf_send_msg_to_pf(nic, &mbx);
527 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
529 /* Enable Receive queue */
532 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
535 /* Configures completion queue */
536 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
537 int qidx, bool enable)
539 struct cmp_queue *cq;
540 struct cq_cfg cq_cfg;
546 nicvf_reclaim_cmp_queue(nic, qs, qidx);
550 /* Reset completion queue */
551 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
556 spin_lock_init(&cq->lock);
557 /* Set completion queue base address */
558 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
559 qidx, (u64)(cq->dmem.phys_base));
561 /* Enable Completion queue */
565 cq_cfg.qsize = CMP_QSIZE;
567 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
569 /* Set threshold value for interrupt generation */
570 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
571 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
572 qidx, nic->cq_coalesce_usecs);
575 /* Configures transmit queue */
576 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
577 int qidx, bool enable)
579 union nic_mbx mbx = {};
580 struct snd_queue *sq;
581 struct sq_cfg sq_cfg;
587 nicvf_reclaim_snd_queue(nic, qs, qidx);
591 /* Reset send queue */
592 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
594 sq->cq_qs = qs->vnic_id;
597 /* Send a mailbox msg to PF to config SQ */
598 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
599 mbx.sq.qs_num = qs->vnic_id;
600 mbx.sq.sq_num = qidx;
601 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
602 nicvf_send_msg_to_pf(nic, &mbx);
604 /* Set queue base address */
605 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
606 qidx, (u64)(sq->dmem.phys_base));
608 /* Enable send queue & set queue size */
612 sq_cfg.qsize = SND_QSIZE;
613 sq_cfg.tstmp_bgx_intf = 0;
614 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
616 /* Set threshold value for interrupt generation */
617 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
619 /* Set queue:cpu affinity for better load distribution */
620 if (cpu_online(qidx)) {
621 cpumask_set_cpu(qidx, &sq->affinity_mask);
622 netif_set_xps_queue(nic->netdev,
623 &sq->affinity_mask, qidx);
627 /* Configures receive buffer descriptor ring */
628 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
629 int qidx, bool enable)
632 struct rbdr_cfg rbdr_cfg;
634 rbdr = &qs->rbdr[qidx];
635 nicvf_reclaim_rbdr(nic, rbdr, qidx);
639 /* Set descriptor base address */
640 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
641 qidx, (u64)(rbdr->dmem.phys_base));
643 /* Enable RBDR & set queue size */
644 /* Buffer size should be in multiples of 128 bytes */
648 rbdr_cfg.qsize = RBDR_SIZE;
649 rbdr_cfg.avg_con = 0;
650 rbdr_cfg.lines = rbdr->dma_size / 128;
651 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
652 qidx, *(u64 *)&rbdr_cfg);
655 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
656 qidx, qs->rbdr_len - 1);
658 /* Set threshold value for interrupt generation */
659 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
660 qidx, rbdr->thresh - 1);
663 /* Requests PF to assign and enable Qset */
664 void nicvf_qset_config(struct nicvf *nic, bool enable)
666 union nic_mbx mbx = {};
667 struct queue_set *qs = nic->qs;
668 struct qs_cfg *qs_cfg;
671 netdev_warn(nic->netdev,
672 "Qset is still not allocated, don't init queues\n");
677 qs->vnic_id = nic->vf_id;
679 /* Send a mailbox msg to PF to config Qset */
680 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
681 mbx.qs.num = qs->vnic_id;
684 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
690 qs_cfg->vnic = qs->vnic_id;
692 nicvf_send_msg_to_pf(nic, &mbx);
695 static void nicvf_free_resources(struct nicvf *nic)
698 struct queue_set *qs = nic->qs;
700 /* Free receive buffer descriptor ring */
701 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
702 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
704 /* Free completion queue */
705 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
706 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
708 /* Free send queue */
709 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
710 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
713 static int nicvf_alloc_resources(struct nicvf *nic)
716 struct queue_set *qs = nic->qs;
718 /* Alloc receive buffer descriptor ring */
719 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
720 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
725 /* Alloc send queue */
726 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
727 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
731 /* Alloc completion queue */
732 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
733 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
739 nicvf_free_resources(nic);
743 int nicvf_set_qset_resources(struct nicvf *nic)
745 struct queue_set *qs;
747 qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
752 /* Set count of each queue */
753 qs->rbdr_cnt = RBDR_CNT;
754 qs->rq_cnt = RCV_QUEUE_CNT;
755 qs->sq_cnt = SND_QUEUE_CNT;
756 qs->cq_cnt = CMP_QUEUE_CNT;
758 /* Set queue lengths */
759 qs->rbdr_len = RCV_BUF_COUNT;
760 qs->sq_len = SND_QUEUE_LEN;
761 qs->cq_len = CMP_QUEUE_LEN;
765 int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
767 bool disable = false;
768 struct queue_set *qs = nic->qs;
775 if (nicvf_alloc_resources(nic))
778 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
779 nicvf_snd_queue_config(nic, qs, qidx, enable);
780 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
781 nicvf_cmp_queue_config(nic, qs, qidx, enable);
782 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
783 nicvf_rbdr_config(nic, qs, qidx, enable);
784 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
785 nicvf_rcv_queue_config(nic, qs, qidx, enable);
787 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
788 nicvf_rcv_queue_config(nic, qs, qidx, disable);
789 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
790 nicvf_rbdr_config(nic, qs, qidx, disable);
791 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
792 nicvf_snd_queue_config(nic, qs, qidx, disable);
793 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
794 nicvf_cmp_queue_config(nic, qs, qidx, disable);
796 nicvf_free_resources(nic);
802 /* Get a free desc from SQ
803 * returns descriptor ponter & descriptor number
805 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
810 atomic_sub(desc_cnt, &sq->free_cnt);
811 sq->tail += desc_cnt;
812 sq->tail &= (sq->dmem.q_len - 1);
817 /* Free descriptor back to SQ for future use */
818 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
820 atomic_add(desc_cnt, &sq->free_cnt);
821 sq->head += desc_cnt;
822 sq->head &= (sq->dmem.q_len - 1);
825 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
828 qentry &= (sq->dmem.q_len - 1);
832 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
836 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
837 sq_cfg |= NICVF_SQ_EN;
838 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
839 /* Ring doorbell so that H/W restarts processing SQEs */
840 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
843 void nicvf_sq_disable(struct nicvf *nic, int qidx)
847 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
848 sq_cfg &= ~NICVF_SQ_EN;
849 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
852 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
857 struct nicvf *nic = netdev_priv(netdev);
858 struct sq_hdr_subdesc *hdr;
860 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
861 tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
862 while (sq->head != head) {
863 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
864 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
865 nicvf_put_sq_desc(sq, 1);
868 skb = (struct sk_buff *)sq->skbuff[sq->head];
870 dev_kfree_skb_any(skb);
871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
872 atomic64_add(hdr->tot_len,
873 (atomic64_t *)&netdev->stats.tx_bytes);
874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
878 /* Calculate no of SQ subdescriptors needed to transmit all
879 * segments of this TSO packet.
880 * Taken from 'Tilera network driver' with a minor modification.
882 static int nicvf_tso_count_subdescs(struct sk_buff *skb)
884 struct skb_shared_info *sh = skb_shinfo(skb);
885 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
886 unsigned int data_len = skb->len - sh_len;
887 unsigned int p_len = sh->gso_size;
888 long f_id = -1; /* id of the current fragment */
889 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
890 long f_used = 0; /* bytes used from the current fragment */
891 long n; /* size of the current piece of payload */
895 for (segment = 0; segment < sh->gso_segs; segment++) {
896 unsigned int p_used = 0;
898 /* One edesc for header and for each piece of the payload. */
899 for (num_edescs++; p_used < p_len; num_edescs++) {
900 /* Advance as needed. */
901 while (f_used >= f_size) {
903 f_size = skb_frag_size(&sh->frags[f_id]);
907 /* Use bytes from the current fragment. */
909 if (n > f_size - f_used)
915 /* The last segment may be less than gso_size. */
917 if (data_len < p_len)
921 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
922 return num_edescs + sh->gso_segs;
925 /* Get the number of SQ descriptors needed to xmit this skb */
926 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
928 int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
930 if (skb_shinfo(skb)->gso_size) {
931 subdesc_cnt = nicvf_tso_count_subdescs(skb);
935 if (skb_shinfo(skb)->nr_frags)
936 subdesc_cnt += skb_shinfo(skb)->nr_frags;
941 /* Add SQ HEADER subdescriptor.
942 * First subdescriptor for every send descriptor.
945 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
946 int subdesc_cnt, struct sk_buff *skb, int len)
949 struct sq_hdr_subdesc *hdr;
951 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
952 sq->skbuff[qentry] = (u64)skb;
954 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
955 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
956 /* Enable notification via CQE after processing SQE */
958 /* No of subdescriptors following this */
959 hdr->subdesc_cnt = subdesc_cnt;
962 /* Offload checksum calculation to HW */
963 if (skb->ip_summed == CHECKSUM_PARTIAL) {
964 if (skb->protocol != htons(ETH_P_IP))
967 hdr->csum_l3 = 1; /* Enable IP csum calculation */
968 hdr->l3_offset = skb_network_offset(skb);
969 hdr->l4_offset = skb_transport_offset(skb);
971 proto = ip_hdr(skb)->protocol;
974 hdr->csum_l4 = SEND_L4_CSUM_TCP;
977 hdr->csum_l4 = SEND_L4_CSUM_UDP;
980 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
986 /* SQ GATHER subdescriptor
987 * Must follow HDR descriptor
989 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
992 struct sq_gather_subdesc *gather;
994 qentry &= (sq->dmem.q_len - 1);
995 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
997 memset(gather, 0, SND_QUEUE_DESC_SIZE);
998 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1000 gather->size = size;
1001 gather->addr = data;
1004 /* Segment a TSO packet into 'gso_size' segments and append
1005 * them to SQ for transfer
1007 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1008 int qentry, struct sk_buff *skb)
1011 int seg_subdescs = 0, desc_cnt = 0;
1012 int seg_len, total_len, data_left;
1013 int hdr_qentry = qentry;
1014 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1016 tso_start(skb, &tso);
1017 total_len = skb->len - hdr_len;
1018 while (total_len > 0) {
1021 /* Save Qentry for adding HDR_SUBDESC at the end */
1022 hdr_qentry = qentry;
1024 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1025 total_len -= data_left;
1027 /* Add segment's header */
1028 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1029 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1030 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1031 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1033 qentry * TSO_HEADER_SIZE);
1034 /* HDR_SUDESC + GATHER */
1038 /* Add segment's payload fragments */
1039 while (data_left > 0) {
1042 size = min_t(int, tso.size, data_left);
1044 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1045 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1046 virt_to_phys(tso.data));
1051 tso_build_data(skb, &tso, size);
1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1054 seg_subdescs - 1, skb, seg_len);
1055 sq->skbuff[hdr_qentry] = (u64)NULL;
1056 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1058 desc_cnt += seg_subdescs;
1060 /* Save SKB in the last segment for freeing */
1061 sq->skbuff[hdr_qentry] = (u64)skb;
1063 /* make sure all memory stores are done before ringing doorbell */
1066 /* Inform HW to xmit all TSO segments */
1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1068 skb_get_queue_mapping(skb), desc_cnt);
1069 nic->drv_stats.tx_tso++;
1073 /* Append an skb to a SQ for packet transfer. */
1074 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1079 struct queue_set *qs = nic->qs;
1080 struct snd_queue *sq;
1082 sq_num = skb_get_queue_mapping(skb);
1083 sq = &qs->sq[sq_num];
1085 subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1086 if (subdesc_cnt > atomic_read(&sq->free_cnt))
1089 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1091 /* Check if its a TSO packet */
1092 if (skb_shinfo(skb)->gso_size)
1093 return nicvf_sq_append_tso(nic, sq, qentry, skb);
1095 /* Add SQ header subdesc */
1096 nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
1098 /* Add SQ gather subdescs */
1099 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1100 size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1101 nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
1103 /* Check for scattered buffer */
1104 if (!skb_is_nonlinear(skb))
1107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1108 const struct skb_frag_struct *frag;
1110 frag = &skb_shinfo(skb)->frags[i];
1112 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1113 size = skb_frag_size(frag);
1114 nicvf_sq_add_gather_subdesc(sq, qentry, size,
1116 skb_frag_address(frag)));
1120 /* make sure all memory stores are done before ringing doorbell */
1123 /* Inform HW to xmit new packet */
1124 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1125 sq_num, subdesc_cnt);
1129 netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1133 static inline unsigned frag_num(unsigned i)
1136 return (i & ~3) + 3 - (i & 3);
1142 /* Returns SKB for a received packet */
1143 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1146 int payload_len = 0;
1147 struct sk_buff *skb = NULL;
1148 struct sk_buff *skb_frag = NULL;
1149 struct sk_buff *prev_frag = NULL;
1150 u16 *rb_lens = NULL;
1151 u64 *rb_ptrs = NULL;
1153 rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1154 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1156 netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1157 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1159 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1160 payload_len = rb_lens[frag_num(frag)];
1162 /* First fragment */
1163 skb = nicvf_rb_ptr_to_skb(nic,
1164 *rb_ptrs - cqe_rx->align_pad,
1168 skb_reserve(skb, cqe_rx->align_pad);
1169 skb_put(skb, payload_len);
1172 skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
1179 if (!skb_shinfo(skb)->frag_list)
1180 skb_shinfo(skb)->frag_list = skb_frag;
1182 prev_frag->next = skb_frag;
1184 prev_frag = skb_frag;
1185 skb->len += payload_len;
1186 skb->data_len += payload_len;
1187 skb_frag->len = payload_len;
1189 /* Next buffer pointer */
1195 /* Enable interrupt */
1196 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1200 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1204 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1207 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1209 case NICVF_INTR_RBDR:
1210 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1212 case NICVF_INTR_PKT_DROP:
1213 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1215 case NICVF_INTR_TCP_TIMER:
1216 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1218 case NICVF_INTR_MBOX:
1219 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1221 case NICVF_INTR_QS_ERR:
1222 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1225 netdev_err(nic->netdev,
1226 "Failed to enable interrupt: unknown type\n");
1230 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
1233 /* Disable interrupt */
1234 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1240 reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1243 reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1245 case NICVF_INTR_RBDR:
1246 reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1248 case NICVF_INTR_PKT_DROP:
1249 reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1251 case NICVF_INTR_TCP_TIMER:
1252 reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1254 case NICVF_INTR_MBOX:
1255 reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1257 case NICVF_INTR_QS_ERR:
1258 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1261 netdev_err(nic->netdev,
1262 "Failed to disable interrupt: unknown type\n");
1266 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
1269 /* Clear interrupt */
1270 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1276 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1279 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1281 case NICVF_INTR_RBDR:
1282 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1284 case NICVF_INTR_PKT_DROP:
1285 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1287 case NICVF_INTR_TCP_TIMER:
1288 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1290 case NICVF_INTR_MBOX:
1291 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1293 case NICVF_INTR_QS_ERR:
1294 reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1297 netdev_err(nic->netdev,
1298 "Failed to clear interrupt: unknown type\n");
1302 nicvf_reg_write(nic, NIC_VF_INT, reg_val);
1305 /* Check if interrupt is enabled */
1306 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1311 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1315 mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1318 mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1320 case NICVF_INTR_RBDR:
1321 mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1323 case NICVF_INTR_PKT_DROP:
1324 mask = NICVF_INTR_PKT_DROP_MASK;
1326 case NICVF_INTR_TCP_TIMER:
1327 mask = NICVF_INTR_TCP_TIMER_MASK;
1329 case NICVF_INTR_MBOX:
1330 mask = NICVF_INTR_MBOX_MASK;
1332 case NICVF_INTR_QS_ERR:
1333 mask = NICVF_INTR_QS_ERR_MASK;
1336 netdev_err(nic->netdev,
1337 "Failed to check interrupt enable: unknown type\n");
1341 return (reg_val & mask);
1344 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1346 struct rcv_queue *rq;
1348 #define GET_RQ_STATS(reg) \
1349 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1350 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1352 rq = &nic->qs->rq[rq_idx];
1353 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1354 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1357 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1359 struct snd_queue *sq;
1361 #define GET_SQ_STATS(reg) \
1362 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1363 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1365 sq = &nic->qs->sq[sq_idx];
1366 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1367 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1370 /* Check for errors in the receive cmp.queue entry */
1371 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
1372 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1374 struct cmp_queue_stats *stats = &cq->stats;
1376 if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
1377 stats->rx.errop.good++;
1381 if (netif_msg_rx_err(nic))
1382 netdev_err(nic->netdev,
1383 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1385 cqe_rx->err_level, cqe_rx->err_opcode);
1387 switch (cqe_rx->err_level) {
1389 stats->rx.errlvl.mac_errs++;
1392 stats->rx.errlvl.l2_errs++;
1395 stats->rx.errlvl.l3_errs++;
1398 stats->rx.errlvl.l4_errs++;
1402 switch (cqe_rx->err_opcode) {
1403 case CQ_RX_ERROP_RE_PARTIAL:
1404 stats->rx.errop.partial_pkts++;
1406 case CQ_RX_ERROP_RE_JABBER:
1407 stats->rx.errop.jabber_errs++;
1409 case CQ_RX_ERROP_RE_FCS:
1410 stats->rx.errop.fcs_errs++;
1412 case CQ_RX_ERROP_RE_TERMINATE:
1413 stats->rx.errop.terminate_errs++;
1415 case CQ_RX_ERROP_RE_RX_CTL:
1416 stats->rx.errop.bgx_rx_errs++;
1418 case CQ_RX_ERROP_PREL2_ERR:
1419 stats->rx.errop.prel2_errs++;
1421 case CQ_RX_ERROP_L2_FRAGMENT:
1422 stats->rx.errop.l2_frags++;
1424 case CQ_RX_ERROP_L2_OVERRUN:
1425 stats->rx.errop.l2_overruns++;
1427 case CQ_RX_ERROP_L2_PFCS:
1428 stats->rx.errop.l2_pfcs++;
1430 case CQ_RX_ERROP_L2_PUNY:
1431 stats->rx.errop.l2_puny++;
1433 case CQ_RX_ERROP_L2_MAL:
1434 stats->rx.errop.l2_hdr_malformed++;
1436 case CQ_RX_ERROP_L2_OVERSIZE:
1437 stats->rx.errop.l2_oversize++;
1439 case CQ_RX_ERROP_L2_UNDERSIZE:
1440 stats->rx.errop.l2_undersize++;
1442 case CQ_RX_ERROP_L2_LENMISM:
1443 stats->rx.errop.l2_len_mismatch++;
1445 case CQ_RX_ERROP_L2_PCLP:
1446 stats->rx.errop.l2_pclp++;
1448 case CQ_RX_ERROP_IP_NOT:
1449 stats->rx.errop.non_ip++;
1451 case CQ_RX_ERROP_IP_CSUM_ERR:
1452 stats->rx.errop.ip_csum_err++;
1454 case CQ_RX_ERROP_IP_MAL:
1455 stats->rx.errop.ip_hdr_malformed++;
1457 case CQ_RX_ERROP_IP_MALD:
1458 stats->rx.errop.ip_payload_malformed++;
1460 case CQ_RX_ERROP_IP_HOP:
1461 stats->rx.errop.ip_hop_errs++;
1463 case CQ_RX_ERROP_L3_ICRC:
1464 stats->rx.errop.l3_icrc_errs++;
1466 case CQ_RX_ERROP_L3_PCLP:
1467 stats->rx.errop.l3_pclp++;
1469 case CQ_RX_ERROP_L4_MAL:
1470 stats->rx.errop.l4_malformed++;
1472 case CQ_RX_ERROP_L4_CHK:
1473 stats->rx.errop.l4_csum_errs++;
1475 case CQ_RX_ERROP_UDP_LEN:
1476 stats->rx.errop.udp_len_err++;
1478 case CQ_RX_ERROP_L4_PORT:
1479 stats->rx.errop.bad_l4_port++;
1481 case CQ_RX_ERROP_TCP_FLAG:
1482 stats->rx.errop.bad_tcp_flag++;
1484 case CQ_RX_ERROP_TCP_OFFSET:
1485 stats->rx.errop.tcp_offset_errs++;
1487 case CQ_RX_ERROP_L4_PCLP:
1488 stats->rx.errop.l4_pclp++;
1490 case CQ_RX_ERROP_RBDR_TRUNC:
1491 stats->rx.errop.pkt_truncated++;
1498 /* Check for errors in the send cmp.queue entry */
1499 int nicvf_check_cqe_tx_errs(struct nicvf *nic,
1500 struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1502 struct cmp_queue_stats *stats = &cq->stats;
1504 switch (cqe_tx->send_status) {
1505 case CQ_TX_ERROP_GOOD:
1508 case CQ_TX_ERROP_DESC_FAULT:
1509 stats->tx.desc_fault++;
1511 case CQ_TX_ERROP_HDR_CONS_ERR:
1512 stats->tx.hdr_cons_err++;
1514 case CQ_TX_ERROP_SUBDC_ERR:
1515 stats->tx.subdesc_err++;
1517 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1518 stats->tx.imm_size_oflow++;
1520 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1521 stats->tx.data_seq_err++;
1523 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1524 stats->tx.mem_seq_err++;
1526 case CQ_TX_ERROP_LOCK_VIOL:
1527 stats->tx.lock_viol++;
1529 case CQ_TX_ERROP_DATA_FAULT:
1530 stats->tx.data_fault++;
1532 case CQ_TX_ERROP_TSTMP_CONFLICT:
1533 stats->tx.tstmp_conflict++;
1535 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1536 stats->tx.tstmp_timeout++;
1538 case CQ_TX_ERROP_MEM_FAULT:
1539 stats->tx.mem_fault++;
1541 case CQ_TX_ERROP_CK_OVERLAP:
1542 stats->tx.csum_overlap++;
1544 case CQ_TX_ERROP_CK_OFLOW:
1545 stats->tx.csum_overflow++;