net: thunderx: Cleanup receive buffer allocation
[linux-2.6-block.git] / drivers / net / ethernet / cavium / thunder / nicvf_queues.c
1 /*
2  * Copyright (C) 2015 Cavium, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  */
8
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/ip.h>
12 #include <linux/etherdevice.h>
13 #include <linux/iommu.h>
14 #include <net/ip.h>
15 #include <net/tso.h>
16
17 #include "nic_reg.h"
18 #include "nic.h"
19 #include "q_struct.h"
20 #include "nicvf_queues.h"
21
22 static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
23 {
24         /* Translation is installed only when IOMMU is present */
25         if (nic->iommu_domain)
26                 return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
27         return dma_addr;
28 }
29
30 static void nicvf_get_page(struct nicvf *nic)
31 {
32         if (!nic->rb_pageref || !nic->rb_page)
33                 return;
34
35         page_ref_add(nic->rb_page, nic->rb_pageref);
36         nic->rb_pageref = 0;
37 }
38
39 /* Poll a register for a specific value */
40 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
41                           u64 reg, int bit_pos, int bits, int val)
42 {
43         u64 bit_mask;
44         u64 reg_val;
45         int timeout = 10;
46
47         bit_mask = (1ULL << bits) - 1;
48         bit_mask = (bit_mask << bit_pos);
49
50         while (timeout) {
51                 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
52                 if (((reg_val & bit_mask) >> bit_pos) == val)
53                         return 0;
54                 usleep_range(1000, 2000);
55                 timeout--;
56         }
57         netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
58         return 1;
59 }
60
61 /* Allocate memory for a queue's descriptors */
62 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
63                                   int q_len, int desc_size, int align_bytes)
64 {
65         dmem->q_len = q_len;
66         dmem->size = (desc_size * q_len) + align_bytes;
67         /* Save address, need it while freeing */
68         dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
69                                                 &dmem->dma, GFP_KERNEL);
70         if (!dmem->unalign_base)
71                 return -ENOMEM;
72
73         /* Align memory address for 'align_bytes' */
74         dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
75         dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
76         return 0;
77 }
78
79 /* Free queue's descriptor memory */
80 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
81 {
82         if (!dmem)
83                 return;
84
85         dma_free_coherent(&nic->pdev->dev, dmem->size,
86                           dmem->unalign_base, dmem->dma);
87         dmem->unalign_base = NULL;
88         dmem->base = NULL;
89 }
90
91 /* Allocate a new page or recycle one if possible
92  *
93  * We cannot optimize dma mapping here, since
94  * 1. It's only one RBDR ring for 8 Rx queues.
95  * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
96  *    and not idx into RBDR ring, so can't refer to saved info.
97  * 3. There are multiple receive buffers per page
98  */
99 static struct pgcache *nicvf_alloc_page(struct nicvf *nic,
100                                         struct rbdr *rbdr, gfp_t gfp)
101 {
102         struct page *page = NULL;
103         struct pgcache *pgcache, *next;
104
105         /* Check if page is already allocated */
106         pgcache = &rbdr->pgcache[rbdr->pgidx];
107         page = pgcache->page;
108         /* Check if page can be recycled */
109         if (page && (page_ref_count(page) != 1))
110                 page = NULL;
111
112         if (!page) {
113                 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
114                 if (!page)
115                         return NULL;
116
117                 this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
118
119                 /* Check for space */
120                 if (rbdr->pgalloc >= rbdr->pgcnt) {
121                         /* Page can still be used */
122                         nic->rb_page = page;
123                         return NULL;
124                 }
125
126                 /* Save the page in page cache */
127                 pgcache->page = page;
128                 rbdr->pgalloc++;
129         }
130
131         /* Take extra page reference for recycling */
132         page_ref_add(page, 1);
133
134         rbdr->pgidx++;
135         rbdr->pgidx &= (rbdr->pgcnt - 1);
136
137         /* Prefetch refcount of next page in page cache */
138         next = &rbdr->pgcache[rbdr->pgidx];
139         page = next->page;
140         if (page)
141                 prefetch(&page->_refcount);
142
143         return pgcache;
144 }
145
146 /* Allocate buffer for packet reception */
147 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
148                                          gfp_t gfp, u32 buf_len, u64 *rbuf)
149 {
150         struct pgcache *pgcache = NULL;
151
152         /* Check if request can be accomodated in previous allocated page */
153         if (nic->rb_page &&
154             ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
155                 nic->rb_pageref++;
156                 goto ret;
157         }
158
159         nicvf_get_page(nic);
160         nic->rb_page = NULL;
161
162         /* Get new page, either recycled or new one */
163         pgcache = nicvf_alloc_page(nic, rbdr, gfp);
164         if (!pgcache && !nic->rb_page) {
165                 this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
166                 return -ENOMEM;
167         }
168
169         nic->rb_page_offset = 0;
170         /* Check if it's recycled */
171         if (pgcache)
172                 nic->rb_page = pgcache->page;
173 ret:
174         /* HW will ensure data coherency, CPU sync not required */
175         *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
176                                         nic->rb_page_offset, buf_len,
177                                         DMA_FROM_DEVICE,
178                                         DMA_ATTR_SKIP_CPU_SYNC);
179         if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
180                 if (!nic->rb_page_offset)
181                         __free_pages(nic->rb_page, 0);
182                 nic->rb_page = NULL;
183                 return -ENOMEM;
184         }
185         nic->rb_page_offset += buf_len;
186
187         return 0;
188 }
189
190 /* Build skb around receive buffer */
191 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
192                                            u64 rb_ptr, int len)
193 {
194         void *data;
195         struct sk_buff *skb;
196
197         data = phys_to_virt(rb_ptr);
198
199         /* Now build an skb to give to stack */
200         skb = build_skb(data, RCV_FRAG_LEN);
201         if (!skb) {
202                 put_page(virt_to_page(data));
203                 return NULL;
204         }
205
206         prefetch(skb->data);
207         return skb;
208 }
209
210 /* Allocate RBDR ring and populate receive buffers */
211 static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
212                             int ring_len, int buf_size)
213 {
214         int idx;
215         u64 rbuf;
216         struct rbdr_entry_t *desc;
217         int err;
218
219         err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
220                                      sizeof(struct rbdr_entry_t),
221                                      NICVF_RCV_BUF_ALIGN_BYTES);
222         if (err)
223                 return err;
224
225         rbdr->desc = rbdr->dmem.base;
226         /* Buffer size has to be in multiples of 128 bytes */
227         rbdr->dma_size = buf_size;
228         rbdr->enable = true;
229         rbdr->thresh = RBDR_THRESH;
230         rbdr->head = 0;
231         rbdr->tail = 0;
232
233         /* Initialize page recycling stuff.
234          *
235          * Can't use single buffer per page especially with 64K pages.
236          * On embedded platforms i.e 81xx/83xx available memory itself
237          * is low and minimum ring size of RBDR is 8K, that takes away
238          * lots of memory.
239          */
240         rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
241         rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
242         rbdr->pgcache = kzalloc(sizeof(*rbdr->pgcache) *
243                                 rbdr->pgcnt, GFP_KERNEL);
244         if (!rbdr->pgcache)
245                 return -ENOMEM;
246         rbdr->pgidx = 0;
247         rbdr->pgalloc = 0;
248
249         nic->rb_page = NULL;
250         for (idx = 0; idx < ring_len; idx++) {
251                 err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
252                                              RCV_FRAG_LEN, &rbuf);
253                 if (err) {
254                         /* To free already allocated and mapped ones */
255                         rbdr->tail = idx - 1;
256                         return err;
257                 }
258
259                 desc = GET_RBDR_DESC(rbdr, idx);
260                 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
261         }
262
263         nicvf_get_page(nic);
264
265         return 0;
266 }
267
268 /* Free RBDR ring and its receive buffers */
269 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
270 {
271         int head, tail;
272         u64 buf_addr, phys_addr;
273         struct pgcache *pgcache;
274         struct rbdr_entry_t *desc;
275
276         if (!rbdr)
277                 return;
278
279         rbdr->enable = false;
280         if (!rbdr->dmem.base)
281                 return;
282
283         head = rbdr->head;
284         tail = rbdr->tail;
285
286         /* Release page references */
287         while (head != tail) {
288                 desc = GET_RBDR_DESC(rbdr, head);
289                 buf_addr = desc->buf_addr;
290                 phys_addr = nicvf_iova_to_phys(nic, buf_addr);
291                 dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
292                                      DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
293                 if (phys_addr)
294                         put_page(virt_to_page(phys_to_virt(phys_addr)));
295                 head++;
296                 head &= (rbdr->dmem.q_len - 1);
297         }
298         /* Release buffer of tail desc */
299         desc = GET_RBDR_DESC(rbdr, tail);
300         buf_addr = desc->buf_addr;
301         phys_addr = nicvf_iova_to_phys(nic, buf_addr);
302         dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
303                              DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
304         if (phys_addr)
305                 put_page(virt_to_page(phys_to_virt(phys_addr)));
306
307         /* Sync page cache info */
308         smp_rmb();
309
310         /* Release additional page references held for recycling */
311         head = 0;
312         while (head < rbdr->pgcnt) {
313                 pgcache = &rbdr->pgcache[head];
314                 if (pgcache->page && page_ref_count(pgcache->page) != 0)
315                         put_page(pgcache->page);
316                 head++;
317         }
318
319         /* Free RBDR ring */
320         nicvf_free_q_desc_mem(nic, &rbdr->dmem);
321 }
322
323 /* Refill receive buffer descriptors with new buffers.
324  */
325 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
326 {
327         struct queue_set *qs = nic->qs;
328         int rbdr_idx = qs->rbdr_cnt;
329         int tail, qcount;
330         int refill_rb_cnt;
331         struct rbdr *rbdr;
332         struct rbdr_entry_t *desc;
333         u64 rbuf;
334         int new_rb = 0;
335
336 refill:
337         if (!rbdr_idx)
338                 return;
339         rbdr_idx--;
340         rbdr = &qs->rbdr[rbdr_idx];
341         /* Check if it's enabled */
342         if (!rbdr->enable)
343                 goto next_rbdr;
344
345         /* Get no of desc's to be refilled */
346         qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
347         qcount &= 0x7FFFF;
348         /* Doorbell can be ringed with a max of ring size minus 1 */
349         if (qcount >= (qs->rbdr_len - 1))
350                 goto next_rbdr;
351         else
352                 refill_rb_cnt = qs->rbdr_len - qcount - 1;
353
354         /* Sync page cache info */
355         smp_rmb();
356
357         /* Start filling descs from tail */
358         tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
359         while (refill_rb_cnt) {
360                 tail++;
361                 tail &= (rbdr->dmem.q_len - 1);
362
363                 if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
364                         break;
365
366                 desc = GET_RBDR_DESC(rbdr, tail);
367                 desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
368                 refill_rb_cnt--;
369                 new_rb++;
370         }
371
372         nicvf_get_page(nic);
373
374         /* make sure all memory stores are done before ringing doorbell */
375         smp_wmb();
376
377         /* Check if buffer allocation failed */
378         if (refill_rb_cnt)
379                 nic->rb_alloc_fail = true;
380         else
381                 nic->rb_alloc_fail = false;
382
383         /* Notify HW */
384         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
385                               rbdr_idx, new_rb);
386 next_rbdr:
387         /* Re-enable RBDR interrupts only if buffer allocation is success */
388         if (!nic->rb_alloc_fail && rbdr->enable &&
389             netif_running(nic->pnicvf->netdev))
390                 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
391
392         if (rbdr_idx)
393                 goto refill;
394 }
395
396 /* Alloc rcv buffers in non-atomic mode for better success */
397 void nicvf_rbdr_work(struct work_struct *work)
398 {
399         struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
400
401         nicvf_refill_rbdr(nic, GFP_KERNEL);
402         if (nic->rb_alloc_fail)
403                 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
404         else
405                 nic->rb_work_scheduled = false;
406 }
407
408 /* In Softirq context, alloc rcv buffers in atomic mode */
409 void nicvf_rbdr_task(unsigned long data)
410 {
411         struct nicvf *nic = (struct nicvf *)data;
412
413         nicvf_refill_rbdr(nic, GFP_ATOMIC);
414         if (nic->rb_alloc_fail) {
415                 nic->rb_work_scheduled = true;
416                 schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
417         }
418 }
419
420 /* Initialize completion queue */
421 static int nicvf_init_cmp_queue(struct nicvf *nic,
422                                 struct cmp_queue *cq, int q_len)
423 {
424         int err;
425
426         err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
427                                      NICVF_CQ_BASE_ALIGN_BYTES);
428         if (err)
429                 return err;
430
431         cq->desc = cq->dmem.base;
432         cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
433         nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
434
435         return 0;
436 }
437
438 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
439 {
440         if (!cq)
441                 return;
442         if (!cq->dmem.base)
443                 return;
444
445         nicvf_free_q_desc_mem(nic, &cq->dmem);
446 }
447
448 /* Initialize transmit queue */
449 static int nicvf_init_snd_queue(struct nicvf *nic,
450                                 struct snd_queue *sq, int q_len)
451 {
452         int err;
453
454         err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
455                                      NICVF_SQ_BASE_ALIGN_BYTES);
456         if (err)
457                 return err;
458
459         sq->desc = sq->dmem.base;
460         sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
461         if (!sq->skbuff)
462                 return -ENOMEM;
463         sq->head = 0;
464         sq->tail = 0;
465         atomic_set(&sq->free_cnt, q_len - 1);
466         sq->thresh = SND_QUEUE_THRESH;
467
468         /* Preallocate memory for TSO segment's header */
469         sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
470                                           q_len * TSO_HEADER_SIZE,
471                                           &sq->tso_hdrs_phys, GFP_KERNEL);
472         if (!sq->tso_hdrs)
473                 return -ENOMEM;
474
475         return 0;
476 }
477
478 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
479                               int hdr_sqe, u8 subdesc_cnt)
480 {
481         u8 idx;
482         struct sq_gather_subdesc *gather;
483
484         /* Unmap DMA mapped skb data buffers */
485         for (idx = 0; idx < subdesc_cnt; idx++) {
486                 hdr_sqe++;
487                 hdr_sqe &= (sq->dmem.q_len - 1);
488                 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
489                 /* HW will ensure data coherency, CPU sync not required */
490                 dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
491                                      gather->size, DMA_TO_DEVICE,
492                                      DMA_ATTR_SKIP_CPU_SYNC);
493         }
494 }
495
496 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
497 {
498         struct sk_buff *skb;
499         struct sq_hdr_subdesc *hdr;
500         struct sq_hdr_subdesc *tso_sqe;
501
502         if (!sq)
503                 return;
504         if (!sq->dmem.base)
505                 return;
506
507         if (sq->tso_hdrs)
508                 dma_free_coherent(&nic->pdev->dev,
509                                   sq->dmem.q_len * TSO_HEADER_SIZE,
510                                   sq->tso_hdrs, sq->tso_hdrs_phys);
511
512         /* Free pending skbs in the queue */
513         smp_rmb();
514         while (sq->head != sq->tail) {
515                 skb = (struct sk_buff *)sq->skbuff[sq->head];
516                 if (!skb)
517                         goto next;
518                 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
519                 /* Check for dummy descriptor used for HW TSO offload on 88xx */
520                 if (hdr->dont_send) {
521                         /* Get actual TSO descriptors and unmap them */
522                         tso_sqe =
523                          (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
524                         nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
525                                                  tso_sqe->subdesc_cnt);
526                 } else {
527                         nicvf_unmap_sndq_buffers(nic, sq, sq->head,
528                                                  hdr->subdesc_cnt);
529                 }
530                 dev_kfree_skb_any(skb);
531 next:
532                 sq->head++;
533                 sq->head &= (sq->dmem.q_len - 1);
534         }
535         kfree(sq->skbuff);
536         nicvf_free_q_desc_mem(nic, &sq->dmem);
537 }
538
539 static void nicvf_reclaim_snd_queue(struct nicvf *nic,
540                                     struct queue_set *qs, int qidx)
541 {
542         /* Disable send queue */
543         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
544         /* Check if SQ is stopped */
545         if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
546                 return;
547         /* Reset send queue */
548         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
549 }
550
551 static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
552                                     struct queue_set *qs, int qidx)
553 {
554         union nic_mbx mbx = {};
555
556         /* Make sure all packets in the pipeline are written back into mem */
557         mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
558         nicvf_send_msg_to_pf(nic, &mbx);
559 }
560
561 static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
562                                     struct queue_set *qs, int qidx)
563 {
564         /* Disable timer threshold (doesn't get reset upon CQ reset */
565         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
566         /* Disable completion queue */
567         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
568         /* Reset completion queue */
569         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
570 }
571
572 static void nicvf_reclaim_rbdr(struct nicvf *nic,
573                                struct rbdr *rbdr, int qidx)
574 {
575         u64 tmp, fifo_state;
576         int timeout = 10;
577
578         /* Save head and tail pointers for feeing up buffers */
579         rbdr->head = nicvf_queue_reg_read(nic,
580                                           NIC_QSET_RBDR_0_1_HEAD,
581                                           qidx) >> 3;
582         rbdr->tail = nicvf_queue_reg_read(nic,
583                                           NIC_QSET_RBDR_0_1_TAIL,
584                                           qidx) >> 3;
585
586         /* If RBDR FIFO is in 'FAIL' state then do a reset first
587          * before relaiming.
588          */
589         fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
590         if (((fifo_state >> 62) & 0x03) == 0x3)
591                 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
592                                       qidx, NICVF_RBDR_RESET);
593
594         /* Disable RBDR */
595         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
596         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
597                 return;
598         while (1) {
599                 tmp = nicvf_queue_reg_read(nic,
600                                            NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
601                                            qidx);
602                 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
603                         break;
604                 usleep_range(1000, 2000);
605                 timeout--;
606                 if (!timeout) {
607                         netdev_err(nic->netdev,
608                                    "Failed polling on prefetch status\n");
609                         return;
610                 }
611         }
612         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
613                               qidx, NICVF_RBDR_RESET);
614
615         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
616                 return;
617         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
618         if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
619                 return;
620 }
621
622 void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
623 {
624         u64 rq_cfg;
625         int sqs;
626
627         rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
628
629         /* Enable first VLAN stripping */
630         if (features & NETIF_F_HW_VLAN_CTAG_RX)
631                 rq_cfg |= (1ULL << 25);
632         else
633                 rq_cfg &= ~(1ULL << 25);
634         nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
635
636         /* Configure Secondary Qsets, if any */
637         for (sqs = 0; sqs < nic->sqs_count; sqs++)
638                 if (nic->snicvf[sqs])
639                         nicvf_queue_reg_write(nic->snicvf[sqs],
640                                               NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
641 }
642
643 static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
644 {
645         union nic_mbx mbx = {};
646
647         /* Reset all RQ/SQ and VF stats */
648         mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
649         mbx.reset_stat.rx_stat_mask = 0x3FFF;
650         mbx.reset_stat.tx_stat_mask = 0x1F;
651         mbx.reset_stat.rq_stat_mask = 0xFFFF;
652         mbx.reset_stat.sq_stat_mask = 0xFFFF;
653         nicvf_send_msg_to_pf(nic, &mbx);
654 }
655
656 /* Configures receive queue */
657 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
658                                    int qidx, bool enable)
659 {
660         union nic_mbx mbx = {};
661         struct rcv_queue *rq;
662         struct rq_cfg rq_cfg;
663
664         rq = &qs->rq[qidx];
665         rq->enable = enable;
666
667         /* Disable receive queue */
668         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
669
670         if (!rq->enable) {
671                 nicvf_reclaim_rcv_queue(nic, qs, qidx);
672                 return;
673         }
674
675         rq->cq_qs = qs->vnic_id;
676         rq->cq_idx = qidx;
677         rq->start_rbdr_qs = qs->vnic_id;
678         rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
679         rq->cont_rbdr_qs = qs->vnic_id;
680         rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
681         /* all writes of RBDR data to be loaded into L2 Cache as well*/
682         rq->caching = 1;
683
684         /* Send a mailbox msg to PF to config RQ */
685         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
686         mbx.rq.qs_num = qs->vnic_id;
687         mbx.rq.rq_num = qidx;
688         mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
689                           (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
690                           (rq->cont_qs_rbdr_idx << 8) |
691                           (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
692         nicvf_send_msg_to_pf(nic, &mbx);
693
694         mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
695         mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
696                      (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
697                      (qs->vnic_id << 0);
698         nicvf_send_msg_to_pf(nic, &mbx);
699
700         /* RQ drop config
701          * Enable CQ drop to reserve sufficient CQEs for all tx packets
702          */
703         mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
704         mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
705                      (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
706                      (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
707         nicvf_send_msg_to_pf(nic, &mbx);
708
709         if (!nic->sqs_mode && (qidx == 0)) {
710                 /* Enable checking L3/L4 length and TCP/UDP checksums
711                  * Also allow IPv6 pkts with zero UDP checksum.
712                  */
713                 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
714                                       (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
715                 nicvf_config_vlan_stripping(nic, nic->netdev->features);
716         }
717
718         /* Enable Receive queue */
719         memset(&rq_cfg, 0, sizeof(struct rq_cfg));
720         rq_cfg.ena = 1;
721         rq_cfg.tcp_ena = 0;
722         nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
723 }
724
725 /* Configures completion queue */
726 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
727                             int qidx, bool enable)
728 {
729         struct cmp_queue *cq;
730         struct cq_cfg cq_cfg;
731
732         cq = &qs->cq[qidx];
733         cq->enable = enable;
734
735         if (!cq->enable) {
736                 nicvf_reclaim_cmp_queue(nic, qs, qidx);
737                 return;
738         }
739
740         /* Reset completion queue */
741         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
742
743         if (!cq->enable)
744                 return;
745
746         spin_lock_init(&cq->lock);
747         /* Set completion queue base address */
748         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
749                               qidx, (u64)(cq->dmem.phys_base));
750
751         /* Enable Completion queue */
752         memset(&cq_cfg, 0, sizeof(struct cq_cfg));
753         cq_cfg.ena = 1;
754         cq_cfg.reset = 0;
755         cq_cfg.caching = 0;
756         cq_cfg.qsize = ilog2(qs->cq_len >> 10);
757         cq_cfg.avg_con = 0;
758         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
759
760         /* Set threshold value for interrupt generation */
761         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
762         nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
763                               qidx, CMP_QUEUE_TIMER_THRESH);
764 }
765
766 /* Configures transmit queue */
767 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
768                                    int qidx, bool enable)
769 {
770         union nic_mbx mbx = {};
771         struct snd_queue *sq;
772         struct sq_cfg sq_cfg;
773
774         sq = &qs->sq[qidx];
775         sq->enable = enable;
776
777         if (!sq->enable) {
778                 nicvf_reclaim_snd_queue(nic, qs, qidx);
779                 return;
780         }
781
782         /* Reset send queue */
783         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
784
785         sq->cq_qs = qs->vnic_id;
786         sq->cq_idx = qidx;
787
788         /* Send a mailbox msg to PF to config SQ */
789         mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
790         mbx.sq.qs_num = qs->vnic_id;
791         mbx.sq.sq_num = qidx;
792         mbx.sq.sqs_mode = nic->sqs_mode;
793         mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
794         nicvf_send_msg_to_pf(nic, &mbx);
795
796         /* Set queue base address */
797         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
798                               qidx, (u64)(sq->dmem.phys_base));
799
800         /* Enable send queue  & set queue size */
801         memset(&sq_cfg, 0, sizeof(struct sq_cfg));
802         sq_cfg.ena = 1;
803         sq_cfg.reset = 0;
804         sq_cfg.ldwb = 0;
805         sq_cfg.qsize = ilog2(qs->sq_len >> 10);
806         sq_cfg.tstmp_bgx_intf = 0;
807         /* CQ's level at which HW will stop processing SQEs to avoid
808          * transmitting a pkt with no space in CQ to post CQE_TX.
809          */
810         sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
811         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
812
813         /* Set threshold value for interrupt generation */
814         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
815
816         /* Set queue:cpu affinity for better load distribution */
817         if (cpu_online(qidx)) {
818                 cpumask_set_cpu(qidx, &sq->affinity_mask);
819                 netif_set_xps_queue(nic->netdev,
820                                     &sq->affinity_mask, qidx);
821         }
822 }
823
824 /* Configures receive buffer descriptor ring */
825 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
826                               int qidx, bool enable)
827 {
828         struct rbdr *rbdr;
829         struct rbdr_cfg rbdr_cfg;
830
831         rbdr = &qs->rbdr[qidx];
832         nicvf_reclaim_rbdr(nic, rbdr, qidx);
833         if (!enable)
834                 return;
835
836         /* Set descriptor base address */
837         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
838                               qidx, (u64)(rbdr->dmem.phys_base));
839
840         /* Enable RBDR  & set queue size */
841         /* Buffer size should be in multiples of 128 bytes */
842         memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
843         rbdr_cfg.ena = 1;
844         rbdr_cfg.reset = 0;
845         rbdr_cfg.ldwb = 0;
846         rbdr_cfg.qsize = RBDR_SIZE;
847         rbdr_cfg.avg_con = 0;
848         rbdr_cfg.lines = rbdr->dma_size / 128;
849         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
850                               qidx, *(u64 *)&rbdr_cfg);
851
852         /* Notify HW */
853         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
854                               qidx, qs->rbdr_len - 1);
855
856         /* Set threshold value for interrupt generation */
857         nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
858                               qidx, rbdr->thresh - 1);
859 }
860
861 /* Requests PF to assign and enable Qset */
862 void nicvf_qset_config(struct nicvf *nic, bool enable)
863 {
864         union nic_mbx mbx = {};
865         struct queue_set *qs = nic->qs;
866         struct qs_cfg *qs_cfg;
867
868         if (!qs) {
869                 netdev_warn(nic->netdev,
870                             "Qset is still not allocated, don't init queues\n");
871                 return;
872         }
873
874         qs->enable = enable;
875         qs->vnic_id = nic->vf_id;
876
877         /* Send a mailbox msg to PF to config Qset */
878         mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
879         mbx.qs.num = qs->vnic_id;
880         mbx.qs.sqs_count = nic->sqs_count;
881
882         mbx.qs.cfg = 0;
883         qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
884         if (qs->enable) {
885                 qs_cfg->ena = 1;
886 #ifdef __BIG_ENDIAN
887                 qs_cfg->be = 1;
888 #endif
889                 qs_cfg->vnic = qs->vnic_id;
890         }
891         nicvf_send_msg_to_pf(nic, &mbx);
892 }
893
894 static void nicvf_free_resources(struct nicvf *nic)
895 {
896         int qidx;
897         struct queue_set *qs = nic->qs;
898
899         /* Free receive buffer descriptor ring */
900         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
901                 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
902
903         /* Free completion queue */
904         for (qidx = 0; qidx < qs->cq_cnt; qidx++)
905                 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
906
907         /* Free send queue */
908         for (qidx = 0; qidx < qs->sq_cnt; qidx++)
909                 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
910 }
911
912 static int nicvf_alloc_resources(struct nicvf *nic)
913 {
914         int qidx;
915         struct queue_set *qs = nic->qs;
916
917         /* Alloc receive buffer descriptor ring */
918         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
919                 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
920                                     DMA_BUFFER_LEN))
921                         goto alloc_fail;
922         }
923
924         /* Alloc send queue */
925         for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
926                 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
927                         goto alloc_fail;
928         }
929
930         /* Alloc completion queue */
931         for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
932                 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
933                         goto alloc_fail;
934         }
935
936         return 0;
937 alloc_fail:
938         nicvf_free_resources(nic);
939         return -ENOMEM;
940 }
941
942 int nicvf_set_qset_resources(struct nicvf *nic)
943 {
944         struct queue_set *qs;
945
946         qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
947         if (!qs)
948                 return -ENOMEM;
949         nic->qs = qs;
950
951         /* Set count of each queue */
952         qs->rbdr_cnt = DEFAULT_RBDR_CNT;
953         qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
954         qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
955         qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
956
957         /* Set queue lengths */
958         qs->rbdr_len = RCV_BUF_COUNT;
959         qs->sq_len = SND_QUEUE_LEN;
960         qs->cq_len = CMP_QUEUE_LEN;
961
962         nic->rx_queues = qs->rq_cnt;
963         nic->tx_queues = qs->sq_cnt;
964
965         return 0;
966 }
967
968 int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
969 {
970         bool disable = false;
971         struct queue_set *qs = nic->qs;
972         struct queue_set *pqs = nic->pnicvf->qs;
973         int qidx;
974
975         if (!qs)
976                 return 0;
977
978         /* Take primary VF's queue lengths.
979          * This is needed to take queue lengths set from ethtool
980          * into consideration.
981          */
982         if (nic->sqs_mode && pqs) {
983                 qs->cq_len = pqs->cq_len;
984                 qs->sq_len = pqs->sq_len;
985         }
986
987         if (enable) {
988                 if (nicvf_alloc_resources(nic))
989                         return -ENOMEM;
990
991                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
992                         nicvf_snd_queue_config(nic, qs, qidx, enable);
993                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
994                         nicvf_cmp_queue_config(nic, qs, qidx, enable);
995                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
996                         nicvf_rbdr_config(nic, qs, qidx, enable);
997                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
998                         nicvf_rcv_queue_config(nic, qs, qidx, enable);
999         } else {
1000                 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1001                         nicvf_rcv_queue_config(nic, qs, qidx, disable);
1002                 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1003                         nicvf_rbdr_config(nic, qs, qidx, disable);
1004                 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1005                         nicvf_snd_queue_config(nic, qs, qidx, disable);
1006                 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1007                         nicvf_cmp_queue_config(nic, qs, qidx, disable);
1008
1009                 nicvf_free_resources(nic);
1010         }
1011
1012         /* Reset RXQ's stats.
1013          * SQ's stats will get reset automatically once SQ is reset.
1014          */
1015         nicvf_reset_rcv_queue_stats(nic);
1016
1017         return 0;
1018 }
1019
1020 /* Get a free desc from SQ
1021  * returns descriptor ponter & descriptor number
1022  */
1023 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1024 {
1025         int qentry;
1026
1027         qentry = sq->tail;
1028         atomic_sub(desc_cnt, &sq->free_cnt);
1029         sq->tail += desc_cnt;
1030         sq->tail &= (sq->dmem.q_len - 1);
1031
1032         return qentry;
1033 }
1034
1035 /* Rollback to previous tail pointer when descriptors not used */
1036 static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
1037                                           int qentry, int desc_cnt)
1038 {
1039         sq->tail = qentry;
1040         atomic_add(desc_cnt, &sq->free_cnt);
1041 }
1042
1043 /* Free descriptor back to SQ for future use */
1044 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1045 {
1046         atomic_add(desc_cnt, &sq->free_cnt);
1047         sq->head += desc_cnt;
1048         sq->head &= (sq->dmem.q_len - 1);
1049 }
1050
1051 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1052 {
1053         qentry++;
1054         qentry &= (sq->dmem.q_len - 1);
1055         return qentry;
1056 }
1057
1058 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1059 {
1060         u64 sq_cfg;
1061
1062         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1063         sq_cfg |= NICVF_SQ_EN;
1064         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1065         /* Ring doorbell so that H/W restarts processing SQEs */
1066         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1067 }
1068
1069 void nicvf_sq_disable(struct nicvf *nic, int qidx)
1070 {
1071         u64 sq_cfg;
1072
1073         sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1074         sq_cfg &= ~NICVF_SQ_EN;
1075         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1076 }
1077
1078 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
1079                               int qidx)
1080 {
1081         u64 head, tail;
1082         struct sk_buff *skb;
1083         struct nicvf *nic = netdev_priv(netdev);
1084         struct sq_hdr_subdesc *hdr;
1085
1086         head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1087         tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1088         while (sq->head != head) {
1089                 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1090                 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1091                         nicvf_put_sq_desc(sq, 1);
1092                         continue;
1093                 }
1094                 skb = (struct sk_buff *)sq->skbuff[sq->head];
1095                 if (skb)
1096                         dev_kfree_skb_any(skb);
1097                 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
1098                 atomic64_add(hdr->tot_len,
1099                              (atomic64_t *)&netdev->stats.tx_bytes);
1100                 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1101         }
1102 }
1103
1104 /* Calculate no of SQ subdescriptors needed to transmit all
1105  * segments of this TSO packet.
1106  * Taken from 'Tilera network driver' with a minor modification.
1107  */
1108 static int nicvf_tso_count_subdescs(struct sk_buff *skb)
1109 {
1110         struct skb_shared_info *sh = skb_shinfo(skb);
1111         unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1112         unsigned int data_len = skb->len - sh_len;
1113         unsigned int p_len = sh->gso_size;
1114         long f_id = -1;    /* id of the current fragment */
1115         long f_size = skb_headlen(skb) - sh_len;  /* current fragment size */
1116         long f_used = 0;  /* bytes used from the current fragment */
1117         long n;            /* size of the current piece of payload */
1118         int num_edescs = 0;
1119         int segment;
1120
1121         for (segment = 0; segment < sh->gso_segs; segment++) {
1122                 unsigned int p_used = 0;
1123
1124                 /* One edesc for header and for each piece of the payload. */
1125                 for (num_edescs++; p_used < p_len; num_edescs++) {
1126                         /* Advance as needed. */
1127                         while (f_used >= f_size) {
1128                                 f_id++;
1129                                 f_size = skb_frag_size(&sh->frags[f_id]);
1130                                 f_used = 0;
1131                         }
1132
1133                         /* Use bytes from the current fragment. */
1134                         n = p_len - p_used;
1135                         if (n > f_size - f_used)
1136                                 n = f_size - f_used;
1137                         f_used += n;
1138                         p_used += n;
1139                 }
1140
1141                 /* The last segment may be less than gso_size. */
1142                 data_len -= p_len;
1143                 if (data_len < p_len)
1144                         p_len = data_len;
1145         }
1146
1147         /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
1148         return num_edescs + sh->gso_segs;
1149 }
1150
1151 #define POST_CQE_DESC_COUNT 2
1152
1153 /* Get the number of SQ descriptors needed to xmit this skb */
1154 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
1155 {
1156         int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
1157
1158         if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
1159                 subdesc_cnt = nicvf_tso_count_subdescs(skb);
1160                 return subdesc_cnt;
1161         }
1162
1163         /* Dummy descriptors to get TSO pkt completion notification */
1164         if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
1165                 subdesc_cnt += POST_CQE_DESC_COUNT;
1166
1167         if (skb_shinfo(skb)->nr_frags)
1168                 subdesc_cnt += skb_shinfo(skb)->nr_frags;
1169
1170         return subdesc_cnt;
1171 }
1172
1173 /* Add SQ HEADER subdescriptor.
1174  * First subdescriptor for every send descriptor.
1175  */
1176 static inline void
1177 nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1178                          int subdesc_cnt, struct sk_buff *skb, int len)
1179 {
1180         int proto;
1181         struct sq_hdr_subdesc *hdr;
1182         union {
1183                 struct iphdr *v4;
1184                 struct ipv6hdr *v6;
1185                 unsigned char *hdr;
1186         } ip;
1187
1188         ip.hdr = skb_network_header(skb);
1189         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1190         memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1191         hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1192
1193         if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
1194                 /* post_cqe = 0, to avoid HW posting a CQE for every TSO
1195                  * segment transmitted on 88xx.
1196                  */
1197                 hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
1198         } else {
1199                 sq->skbuff[qentry] = (u64)skb;
1200                 /* Enable notification via CQE after processing SQE */
1201                 hdr->post_cqe = 1;
1202                 /* No of subdescriptors following this */
1203                 hdr->subdesc_cnt = subdesc_cnt;
1204         }
1205         hdr->tot_len = len;
1206
1207         /* Offload checksum calculation to HW */
1208         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1209                 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1210                 hdr->l3_offset = skb_network_offset(skb);
1211                 hdr->l4_offset = skb_transport_offset(skb);
1212
1213                 proto = (ip.v4->version == 4) ? ip.v4->protocol :
1214                         ip.v6->nexthdr;
1215
1216                 switch (proto) {
1217                 case IPPROTO_TCP:
1218                         hdr->csum_l4 = SEND_L4_CSUM_TCP;
1219                         break;
1220                 case IPPROTO_UDP:
1221                         hdr->csum_l4 = SEND_L4_CSUM_UDP;
1222                         break;
1223                 case IPPROTO_SCTP:
1224                         hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1225                         break;
1226                 }
1227         }
1228
1229         if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
1230                 hdr->tso = 1;
1231                 hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
1232                 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1233                 /* For non-tunneled pkts, point this to L2 ethertype */
1234                 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1235                 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1236         }
1237 }
1238
1239 /* SQ GATHER subdescriptor
1240  * Must follow HDR descriptor
1241  */
1242 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1243                                                int size, u64 data)
1244 {
1245         struct sq_gather_subdesc *gather;
1246
1247         qentry &= (sq->dmem.q_len - 1);
1248         gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1249
1250         memset(gather, 0, SND_QUEUE_DESC_SIZE);
1251         gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1252         gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1253         gather->size = size;
1254         gather->addr = data;
1255 }
1256
1257 /* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
1258  * packet so that a CQE is posted as a notifation for transmission of
1259  * TSO packet.
1260  */
1261 static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
1262                                             int tso_sqe, struct sk_buff *skb)
1263 {
1264         struct sq_imm_subdesc *imm;
1265         struct sq_hdr_subdesc *hdr;
1266
1267         sq->skbuff[qentry] = (u64)skb;
1268
1269         hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1270         memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1271         hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1272         /* Enable notification via CQE after processing SQE */
1273         hdr->post_cqe = 1;
1274         /* There is no packet to transmit here */
1275         hdr->dont_send = 1;
1276         hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
1277         hdr->tot_len = 1;
1278         /* Actual TSO header SQE index, needed for cleanup */
1279         hdr->rsvd2 = tso_sqe;
1280
1281         qentry = nicvf_get_nxt_sqentry(sq, qentry);
1282         imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
1283         memset(imm, 0, SND_QUEUE_DESC_SIZE);
1284         imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
1285         imm->len = 1;
1286 }
1287
1288 static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
1289                                      int sq_num, int desc_cnt)
1290 {
1291         struct netdev_queue *txq;
1292
1293         txq = netdev_get_tx_queue(nic->pnicvf->netdev,
1294                                   skb_get_queue_mapping(skb));
1295
1296         netdev_tx_sent_queue(txq, skb->len);
1297
1298         /* make sure all memory stores are done before ringing doorbell */
1299         smp_wmb();
1300
1301         /* Inform HW to xmit all TSO segments */
1302         nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1303                               sq_num, desc_cnt);
1304 }
1305
1306 /* Segment a TSO packet into 'gso_size' segments and append
1307  * them to SQ for transfer
1308  */
1309 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1310                                int sq_num, int qentry, struct sk_buff *skb)
1311 {
1312         struct tso_t tso;
1313         int seg_subdescs = 0, desc_cnt = 0;
1314         int seg_len, total_len, data_left;
1315         int hdr_qentry = qentry;
1316         int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1317
1318         tso_start(skb, &tso);
1319         total_len = skb->len - hdr_len;
1320         while (total_len > 0) {
1321                 char *hdr;
1322
1323                 /* Save Qentry for adding HDR_SUBDESC at the end */
1324                 hdr_qentry = qentry;
1325
1326                 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1327                 total_len -= data_left;
1328
1329                 /* Add segment's header */
1330                 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1331                 hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1332                 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1333                 nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1334                                             sq->tso_hdrs_phys +
1335                                             qentry * TSO_HEADER_SIZE);
1336                 /* HDR_SUDESC + GATHER */
1337                 seg_subdescs = 2;
1338                 seg_len = hdr_len;
1339
1340                 /* Add segment's payload fragments */
1341                 while (data_left > 0) {
1342                         int size;
1343
1344                         size = min_t(int, tso.size, data_left);
1345
1346                         qentry = nicvf_get_nxt_sqentry(sq, qentry);
1347                         nicvf_sq_add_gather_subdesc(sq, qentry, size,
1348                                                     virt_to_phys(tso.data));
1349                         seg_subdescs++;
1350                         seg_len += size;
1351
1352                         data_left -= size;
1353                         tso_build_data(skb, &tso, size);
1354                 }
1355                 nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
1356                                          seg_subdescs - 1, skb, seg_len);
1357                 sq->skbuff[hdr_qentry] = (u64)NULL;
1358                 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1359
1360                 desc_cnt += seg_subdescs;
1361         }
1362         /* Save SKB in the last segment for freeing */
1363         sq->skbuff[hdr_qentry] = (u64)skb;
1364
1365         nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1366
1367         this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1368         return 1;
1369 }
1370
1371 /* Append an skb to a SQ for packet transfer. */
1372 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
1373                         struct sk_buff *skb, u8 sq_num)
1374 {
1375         int i, size;
1376         int subdesc_cnt, hdr_sqe = 0;
1377         int qentry;
1378         u64 dma_addr;
1379
1380         subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1381         if (subdesc_cnt > atomic_read(&sq->free_cnt))
1382                 goto append_fail;
1383
1384         qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1385
1386         /* Check if its a TSO packet */
1387         if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
1388                 return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
1389
1390         /* Add SQ header subdesc */
1391         nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
1392                                  skb, skb->len);
1393         hdr_sqe = qentry;
1394
1395         /* Add SQ gather subdescs */
1396         qentry = nicvf_get_nxt_sqentry(sq, qentry);
1397         size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1398         /* HW will ensure data coherency, CPU sync not required */
1399         dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
1400                                       offset_in_page(skb->data), size,
1401                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1402         if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1403                 nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1404                 return 0;
1405         }
1406
1407         nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1408
1409         /* Check for scattered buffer */
1410         if (!skb_is_nonlinear(skb))
1411                 goto doorbell;
1412
1413         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1414                 const struct skb_frag_struct *frag;
1415
1416                 frag = &skb_shinfo(skb)->frags[i];
1417
1418                 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1419                 size = skb_frag_size(frag);
1420                 dma_addr = dma_map_page_attrs(&nic->pdev->dev,
1421                                               skb_frag_page(frag),
1422                                               frag->page_offset, size,
1423                                               DMA_TO_DEVICE,
1424                                               DMA_ATTR_SKIP_CPU_SYNC);
1425                 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1426                         /* Free entire chain of mapped buffers
1427                          * here 'i' = frags mapped + above mapped skb->data
1428                          */
1429                         nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
1430                         nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
1431                         return 0;
1432                 }
1433                 nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
1434         }
1435
1436 doorbell:
1437         if (nic->t88 && skb_shinfo(skb)->gso_size) {
1438                 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1439                 nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
1440         }
1441
1442         nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
1443
1444         return 1;
1445
1446 append_fail:
1447         /* Use original PCI dev for debug log */
1448         nic = nic->pnicvf;
1449         netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1450         return 0;
1451 }
1452
1453 static inline unsigned frag_num(unsigned i)
1454 {
1455 #ifdef __BIG_ENDIAN
1456         return (i & ~3) + 3 - (i & 3);
1457 #else
1458         return i;
1459 #endif
1460 }
1461
1462 /* Returns SKB for a received packet */
1463 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1464 {
1465         int frag;
1466         int payload_len = 0;
1467         struct sk_buff *skb = NULL;
1468         struct page *page;
1469         int offset;
1470         u16 *rb_lens = NULL;
1471         u64 *rb_ptrs = NULL;
1472         u64 phys_addr;
1473
1474         rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1475         /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
1476          * CQE_RX at word6, hence buffer pointers move by word
1477          *
1478          * Use existing 'hw_tso' flag which will be set for all chips
1479          * except 88xx pass1 instead of a additional cache line
1480          * access (or miss) by using pci dev's revision.
1481          */
1482         if (!nic->hw_tso)
1483                 rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1484         else
1485                 rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
1486
1487         for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1488                 payload_len = rb_lens[frag_num(frag)];
1489                 phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
1490                 if (!phys_addr) {
1491                         if (skb)
1492                                 dev_kfree_skb_any(skb);
1493                         return NULL;
1494                 }
1495
1496                 if (!frag) {
1497                         /* First fragment */
1498                         dma_unmap_page_attrs(&nic->pdev->dev,
1499                                              *rb_ptrs - cqe_rx->align_pad,
1500                                              RCV_FRAG_LEN, DMA_FROM_DEVICE,
1501                                              DMA_ATTR_SKIP_CPU_SYNC);
1502                         skb = nicvf_rb_ptr_to_skb(nic,
1503                                                   phys_addr - cqe_rx->align_pad,
1504                                                   payload_len);
1505                         if (!skb)
1506                                 return NULL;
1507                         skb_reserve(skb, cqe_rx->align_pad);
1508                         skb_put(skb, payload_len);
1509                 } else {
1510                         /* Add fragments */
1511                         dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
1512                                              RCV_FRAG_LEN, DMA_FROM_DEVICE,
1513                                              DMA_ATTR_SKIP_CPU_SYNC);
1514                         page = virt_to_page(phys_to_virt(phys_addr));
1515                         offset = phys_to_virt(phys_addr) - page_address(page);
1516                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1517                                         offset, payload_len, RCV_FRAG_LEN);
1518                 }
1519                 /* Next buffer pointer */
1520                 rb_ptrs++;
1521         }
1522         return skb;
1523 }
1524
1525 static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
1526 {
1527         u64 reg_val;
1528
1529         switch (int_type) {
1530         case NICVF_INTR_CQ:
1531                 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1532                 break;
1533         case NICVF_INTR_SQ:
1534                 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1535                 break;
1536         case NICVF_INTR_RBDR:
1537                 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1538                 break;
1539         case NICVF_INTR_PKT_DROP:
1540                 reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1541                 break;
1542         case NICVF_INTR_TCP_TIMER:
1543                 reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1544                 break;
1545         case NICVF_INTR_MBOX:
1546                 reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1547                 break;
1548         case NICVF_INTR_QS_ERR:
1549                 reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1550                 break;
1551         default:
1552                 reg_val = 0;
1553         }
1554
1555         return reg_val;
1556 }
1557
1558 /* Enable interrupt */
1559 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1560 {
1561         u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1562
1563         if (!mask) {
1564                 netdev_dbg(nic->netdev,
1565                            "Failed to enable interrupt: unknown type\n");
1566                 return;
1567         }
1568         nicvf_reg_write(nic, NIC_VF_ENA_W1S,
1569                         nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
1570 }
1571
1572 /* Disable interrupt */
1573 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1574 {
1575         u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1576
1577         if (!mask) {
1578                 netdev_dbg(nic->netdev,
1579                            "Failed to disable interrupt: unknown type\n");
1580                 return;
1581         }
1582
1583         nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
1584 }
1585
1586 /* Clear interrupt */
1587 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1588 {
1589         u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1590
1591         if (!mask) {
1592                 netdev_dbg(nic->netdev,
1593                            "Failed to clear interrupt: unknown type\n");
1594                 return;
1595         }
1596
1597         nicvf_reg_write(nic, NIC_VF_INT, mask);
1598 }
1599
1600 /* Check if interrupt is enabled */
1601 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1602 {
1603         u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
1604         /* If interrupt type is unknown, we treat it disabled. */
1605         if (!mask) {
1606                 netdev_dbg(nic->netdev,
1607                            "Failed to check interrupt enable: unknown type\n");
1608                 return 0;
1609         }
1610
1611         return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1612 }
1613
1614 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1615 {
1616         struct rcv_queue *rq;
1617
1618 #define GET_RQ_STATS(reg) \
1619         nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1620                             (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1621
1622         rq = &nic->qs->rq[rq_idx];
1623         rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1624         rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1625 }
1626
1627 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1628 {
1629         struct snd_queue *sq;
1630
1631 #define GET_SQ_STATS(reg) \
1632         nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1633                             (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1634
1635         sq = &nic->qs->sq[sq_idx];
1636         sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1637         sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1638 }
1639
1640 /* Check for errors in the receive cmp.queue entry */
1641 int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1642 {
1643         if (netif_msg_rx_err(nic))
1644                 netdev_err(nic->netdev,
1645                            "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1646                            nic->netdev->name,
1647                            cqe_rx->err_level, cqe_rx->err_opcode);
1648
1649         switch (cqe_rx->err_opcode) {
1650         case CQ_RX_ERROP_RE_PARTIAL:
1651                 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1652                 break;
1653         case CQ_RX_ERROP_RE_JABBER:
1654                 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1655                 break;
1656         case CQ_RX_ERROP_RE_FCS:
1657                 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1658                 break;
1659         case CQ_RX_ERROP_RE_RX_CTL:
1660                 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1661                 break;
1662         case CQ_RX_ERROP_PREL2_ERR:
1663                 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1664                 break;
1665         case CQ_RX_ERROP_L2_MAL:
1666                 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1667                 break;
1668         case CQ_RX_ERROP_L2_OVERSIZE:
1669                 this_cpu_inc(nic->drv_stats->rx_oversize);
1670                 break;
1671         case CQ_RX_ERROP_L2_UNDERSIZE:
1672                 this_cpu_inc(nic->drv_stats->rx_undersize);
1673                 break;
1674         case CQ_RX_ERROP_L2_LENMISM:
1675                 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1676                 break;
1677         case CQ_RX_ERROP_L2_PCLP:
1678                 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1679                 break;
1680         case CQ_RX_ERROP_IP_NOT:
1681                 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1682                 break;
1683         case CQ_RX_ERROP_IP_CSUM_ERR:
1684                 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1685                 break;
1686         case CQ_RX_ERROP_IP_MAL:
1687                 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1688                 break;
1689         case CQ_RX_ERROP_IP_MALD:
1690                 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1691                 break;
1692         case CQ_RX_ERROP_IP_HOP:
1693                 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1694                 break;
1695         case CQ_RX_ERROP_L3_PCLP:
1696                 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1697                 break;
1698         case CQ_RX_ERROP_L4_MAL:
1699                 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1700                 break;
1701         case CQ_RX_ERROP_L4_CHK:
1702                 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1703                 break;
1704         case CQ_RX_ERROP_UDP_LEN:
1705                 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1706                 break;
1707         case CQ_RX_ERROP_L4_PORT:
1708                 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1709                 break;
1710         case CQ_RX_ERROP_TCP_FLAG:
1711                 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1712                 break;
1713         case CQ_RX_ERROP_TCP_OFFSET:
1714                 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1715                 break;
1716         case CQ_RX_ERROP_L4_PCLP:
1717                 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1718                 break;
1719         case CQ_RX_ERROP_RBDR_TRUNC:
1720                 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1721                 break;
1722         }
1723
1724         return 1;
1725 }
1726
1727 /* Check for errors in the send cmp.queue entry */
1728 int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1729 {
1730         switch (cqe_tx->send_status) {
1731         case CQ_TX_ERROP_DESC_FAULT:
1732                 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1733                 break;
1734         case CQ_TX_ERROP_HDR_CONS_ERR:
1735                 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1736                 break;
1737         case CQ_TX_ERROP_SUBDC_ERR:
1738                 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1739                 break;
1740         case CQ_TX_ERROP_MAX_SIZE_VIOL:
1741                 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1742                 break;
1743         case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1744                 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1745                 break;
1746         case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1747                 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1748                 break;
1749         case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1750                 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1751                 break;
1752         case CQ_TX_ERROP_LOCK_VIOL:
1753                 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1754                 break;
1755         case CQ_TX_ERROP_DATA_FAULT:
1756                 this_cpu_inc(nic->drv_stats->tx_data_fault);
1757                 break;
1758         case CQ_TX_ERROP_TSTMP_CONFLICT:
1759                 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1760                 break;
1761         case CQ_TX_ERROP_TSTMP_TIMEOUT:
1762                 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1763                 break;
1764         case CQ_TX_ERROP_MEM_FAULT:
1765                 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1766                 break;
1767         case CQ_TX_ERROP_CK_OVERLAP:
1768                 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1769                 break;
1770         case CQ_TX_ERROP_CK_OFLOW:
1771                 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
1772                 break;
1773         }
1774
1775         return 1;
1776 }