Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/pci.h> | |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/ip.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <net/ip.h> | |
14 | #include <net/tso.h> | |
15 | ||
16 | #include "nic_reg.h" | |
17 | #include "nic.h" | |
18 | #include "q_struct.h" | |
19 | #include "nicvf_queues.h" | |
20 | ||
21 | struct rbuf_info { | |
22 | struct page *page; | |
23 | void *data; | |
24 | u64 offset; | |
25 | }; | |
26 | ||
27 | #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) | |
28 | ||
29 | /* Poll a register for a specific value */ | |
30 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, | |
31 | u64 reg, int bit_pos, int bits, int val) | |
32 | { | |
33 | u64 bit_mask; | |
34 | u64 reg_val; | |
35 | int timeout = 10; | |
36 | ||
37 | bit_mask = (1ULL << bits) - 1; | |
38 | bit_mask = (bit_mask << bit_pos); | |
39 | ||
40 | while (timeout) { | |
41 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); | |
42 | if (((reg_val & bit_mask) >> bit_pos) == val) | |
43 | return 0; | |
44 | usleep_range(1000, 2000); | |
45 | timeout--; | |
46 | } | |
47 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); | |
48 | return 1; | |
49 | } | |
50 | ||
51 | /* Allocate memory for a queue's descriptors */ | |
52 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | |
53 | int q_len, int desc_size, int align_bytes) | |
54 | { | |
55 | dmem->q_len = q_len; | |
56 | dmem->size = (desc_size * q_len) + align_bytes; | |
57 | /* Save address, need it while freeing */ | |
58 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | |
59 | &dmem->dma, GFP_KERNEL); | |
60 | if (!dmem->unalign_base) | |
61 | return -ENOMEM; | |
62 | ||
63 | /* Align memory address for 'align_bytes' */ | |
64 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); | |
39a0dd0b | 65 | dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); |
4863dea3 SG |
66 | return 0; |
67 | } | |
68 | ||
69 | /* Free queue's descriptor memory */ | |
70 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) | |
71 | { | |
72 | if (!dmem) | |
73 | return; | |
74 | ||
75 | dma_free_coherent(&nic->pdev->dev, dmem->size, | |
76 | dmem->unalign_base, dmem->dma); | |
77 | dmem->unalign_base = NULL; | |
78 | dmem->base = NULL; | |
79 | } | |
80 | ||
81 | /* Allocate buffer for packet reception | |
82 | * HW returns memory address where packet is DMA'ed but not a pointer | |
83 | * into RBDR ring, so save buffer address at the start of fragment and | |
84 | * align the start address to a cache aligned address | |
85 | */ | |
86 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, | |
87 | u32 buf_len, u64 **rbuf) | |
88 | { | |
89 | u64 data; | |
90 | struct rbuf_info *rinfo; | |
91 | int order = get_order(buf_len); | |
92 | ||
93 | /* Check if request can be accomodated in previous allocated page */ | |
94 | if (nic->rb_page) { | |
95 | if ((nic->rb_page_offset + buf_len + buf_len) > | |
96 | (PAGE_SIZE << order)) { | |
97 | nic->rb_page = NULL; | |
98 | } else { | |
99 | nic->rb_page_offset += buf_len; | |
100 | get_page(nic->rb_page); | |
101 | } | |
102 | } | |
103 | ||
104 | /* Allocate a new page */ | |
105 | if (!nic->rb_page) { | |
106 | nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); | |
107 | if (!nic->rb_page) { | |
108 | netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); | |
109 | return -ENOMEM; | |
110 | } | |
111 | nic->rb_page_offset = 0; | |
112 | } | |
113 | ||
114 | data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; | |
115 | ||
116 | /* Align buffer addr to cache line i.e 128 bytes */ | |
117 | rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); | |
118 | /* Save page address for reference updation */ | |
119 | rinfo->page = nic->rb_page; | |
120 | /* Store start address for later retrieval */ | |
121 | rinfo->data = (void *)data; | |
122 | /* Store alignment offset */ | |
123 | rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); | |
124 | ||
125 | data += rinfo->offset; | |
126 | ||
127 | /* Give next aligned address to hw for DMA */ | |
128 | *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); | |
129 | return 0; | |
130 | } | |
131 | ||
132 | /* Retrieve actual buffer start address and build skb for received packet */ | |
133 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, | |
134 | u64 rb_ptr, int len) | |
135 | { | |
136 | struct sk_buff *skb; | |
137 | struct rbuf_info *rinfo; | |
138 | ||
139 | rb_ptr = (u64)phys_to_virt(rb_ptr); | |
140 | /* Get buffer start address and alignment offset */ | |
141 | rinfo = GET_RBUF_INFO(rb_ptr); | |
142 | ||
143 | /* Now build an skb to give to stack */ | |
144 | skb = build_skb(rinfo->data, RCV_FRAG_LEN); | |
145 | if (!skb) { | |
146 | put_page(rinfo->page); | |
147 | return NULL; | |
148 | } | |
149 | ||
150 | /* Set correct skb->data */ | |
151 | skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); | |
152 | ||
153 | prefetch((void *)rb_ptr); | |
154 | return skb; | |
155 | } | |
156 | ||
157 | /* Allocate RBDR ring and populate receive buffers */ | |
158 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, | |
159 | int ring_len, int buf_size) | |
160 | { | |
161 | int idx; | |
162 | u64 *rbuf; | |
163 | struct rbdr_entry_t *desc; | |
164 | int err; | |
165 | ||
166 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, | |
167 | sizeof(struct rbdr_entry_t), | |
168 | NICVF_RCV_BUF_ALIGN_BYTES); | |
169 | if (err) | |
170 | return err; | |
171 | ||
172 | rbdr->desc = rbdr->dmem.base; | |
173 | /* Buffer size has to be in multiples of 128 bytes */ | |
174 | rbdr->dma_size = buf_size; | |
175 | rbdr->enable = true; | |
176 | rbdr->thresh = RBDR_THRESH; | |
177 | ||
178 | nic->rb_page = NULL; | |
179 | for (idx = 0; idx < ring_len; idx++) { | |
180 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, | |
181 | &rbuf); | |
182 | if (err) | |
183 | return err; | |
184 | ||
185 | desc = GET_RBDR_DESC(rbdr, idx); | |
186 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
187 | } | |
188 | return 0; | |
189 | } | |
190 | ||
191 | /* Free RBDR ring and its receive buffers */ | |
192 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |
193 | { | |
194 | int head, tail; | |
195 | u64 buf_addr; | |
196 | struct rbdr_entry_t *desc; | |
197 | struct rbuf_info *rinfo; | |
198 | ||
199 | if (!rbdr) | |
200 | return; | |
201 | ||
202 | rbdr->enable = false; | |
203 | if (!rbdr->dmem.base) | |
204 | return; | |
205 | ||
206 | head = rbdr->head; | |
207 | tail = rbdr->tail; | |
208 | ||
209 | /* Free SKBs */ | |
210 | while (head != tail) { | |
211 | desc = GET_RBDR_DESC(rbdr, head); | |
212 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
213 | rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); | |
214 | put_page(rinfo->page); | |
215 | head++; | |
216 | head &= (rbdr->dmem.q_len - 1); | |
217 | } | |
218 | /* Free SKB of tail desc */ | |
219 | desc = GET_RBDR_DESC(rbdr, tail); | |
220 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
221 | rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); | |
222 | put_page(rinfo->page); | |
223 | ||
224 | /* Free RBDR ring */ | |
225 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); | |
226 | } | |
227 | ||
228 | /* Refill receive buffer descriptors with new buffers. | |
229 | */ | |
fd7ec062 | 230 | static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) |
4863dea3 SG |
231 | { |
232 | struct queue_set *qs = nic->qs; | |
233 | int rbdr_idx = qs->rbdr_cnt; | |
234 | int tail, qcount; | |
235 | int refill_rb_cnt; | |
236 | struct rbdr *rbdr; | |
237 | struct rbdr_entry_t *desc; | |
238 | u64 *rbuf; | |
239 | int new_rb = 0; | |
240 | ||
241 | refill: | |
242 | if (!rbdr_idx) | |
243 | return; | |
244 | rbdr_idx--; | |
245 | rbdr = &qs->rbdr[rbdr_idx]; | |
246 | /* Check if it's enabled */ | |
247 | if (!rbdr->enable) | |
248 | goto next_rbdr; | |
249 | ||
250 | /* Get no of desc's to be refilled */ | |
251 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); | |
252 | qcount &= 0x7FFFF; | |
253 | /* Doorbell can be ringed with a max of ring size minus 1 */ | |
254 | if (qcount >= (qs->rbdr_len - 1)) | |
255 | goto next_rbdr; | |
256 | else | |
257 | refill_rb_cnt = qs->rbdr_len - qcount - 1; | |
258 | ||
259 | /* Start filling descs from tail */ | |
260 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; | |
261 | while (refill_rb_cnt) { | |
262 | tail++; | |
263 | tail &= (rbdr->dmem.q_len - 1); | |
264 | ||
265 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) | |
266 | break; | |
267 | ||
268 | desc = GET_RBDR_DESC(rbdr, tail); | |
269 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
270 | refill_rb_cnt--; | |
271 | new_rb++; | |
272 | } | |
273 | ||
274 | /* make sure all memory stores are done before ringing doorbell */ | |
275 | smp_wmb(); | |
276 | ||
277 | /* Check if buffer allocation failed */ | |
278 | if (refill_rb_cnt) | |
279 | nic->rb_alloc_fail = true; | |
280 | else | |
281 | nic->rb_alloc_fail = false; | |
282 | ||
283 | /* Notify HW */ | |
284 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
285 | rbdr_idx, new_rb); | |
286 | next_rbdr: | |
287 | /* Re-enable RBDR interrupts only if buffer allocation is success */ | |
288 | if (!nic->rb_alloc_fail && rbdr->enable) | |
289 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); | |
290 | ||
291 | if (rbdr_idx) | |
292 | goto refill; | |
293 | } | |
294 | ||
295 | /* Alloc rcv buffers in non-atomic mode for better success */ | |
296 | void nicvf_rbdr_work(struct work_struct *work) | |
297 | { | |
298 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); | |
299 | ||
300 | nicvf_refill_rbdr(nic, GFP_KERNEL); | |
301 | if (nic->rb_alloc_fail) | |
302 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
303 | else | |
304 | nic->rb_work_scheduled = false; | |
305 | } | |
306 | ||
307 | /* In Softirq context, alloc rcv buffers in atomic mode */ | |
308 | void nicvf_rbdr_task(unsigned long data) | |
309 | { | |
310 | struct nicvf *nic = (struct nicvf *)data; | |
311 | ||
312 | nicvf_refill_rbdr(nic, GFP_ATOMIC); | |
313 | if (nic->rb_alloc_fail) { | |
314 | nic->rb_work_scheduled = true; | |
315 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
316 | } | |
317 | } | |
318 | ||
319 | /* Initialize completion queue */ | |
320 | static int nicvf_init_cmp_queue(struct nicvf *nic, | |
321 | struct cmp_queue *cq, int q_len) | |
322 | { | |
323 | int err; | |
324 | ||
325 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, | |
326 | NICVF_CQ_BASE_ALIGN_BYTES); | |
327 | if (err) | |
328 | return err; | |
329 | ||
330 | cq->desc = cq->dmem.base; | |
331 | cq->thresh = CMP_QUEUE_CQE_THRESH; | |
332 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; | |
333 | ||
334 | return 0; | |
335 | } | |
336 | ||
337 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) | |
338 | { | |
339 | if (!cq) | |
340 | return; | |
341 | if (!cq->dmem.base) | |
342 | return; | |
343 | ||
344 | nicvf_free_q_desc_mem(nic, &cq->dmem); | |
345 | } | |
346 | ||
347 | /* Initialize transmit queue */ | |
348 | static int nicvf_init_snd_queue(struct nicvf *nic, | |
349 | struct snd_queue *sq, int q_len) | |
350 | { | |
351 | int err; | |
352 | ||
353 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, | |
354 | NICVF_SQ_BASE_ALIGN_BYTES); | |
355 | if (err) | |
356 | return err; | |
357 | ||
358 | sq->desc = sq->dmem.base; | |
359 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC); | |
360 | sq->head = 0; | |
361 | sq->tail = 0; | |
362 | atomic_set(&sq->free_cnt, q_len - 1); | |
363 | sq->thresh = SND_QUEUE_THRESH; | |
364 | ||
365 | /* Preallocate memory for TSO segment's header */ | |
366 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, | |
367 | q_len * TSO_HEADER_SIZE, | |
368 | &sq->tso_hdrs_phys, GFP_KERNEL); | |
369 | if (!sq->tso_hdrs) | |
370 | return -ENOMEM; | |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
375 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | |
376 | { | |
377 | if (!sq) | |
378 | return; | |
379 | if (!sq->dmem.base) | |
380 | return; | |
381 | ||
382 | if (sq->tso_hdrs) | |
383 | dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, | |
384 | sq->tso_hdrs, sq->tso_hdrs_phys); | |
385 | ||
386 | kfree(sq->skbuff); | |
387 | nicvf_free_q_desc_mem(nic, &sq->dmem); | |
388 | } | |
389 | ||
390 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, | |
391 | struct queue_set *qs, int qidx) | |
392 | { | |
393 | /* Disable send queue */ | |
394 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); | |
395 | /* Check if SQ is stopped */ | |
396 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) | |
397 | return; | |
398 | /* Reset send queue */ | |
399 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
400 | } | |
401 | ||
402 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, | |
403 | struct queue_set *qs, int qidx) | |
404 | { | |
405 | union nic_mbx mbx = {}; | |
406 | ||
407 | /* Make sure all packets in the pipeline are written back into mem */ | |
408 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; | |
409 | nicvf_send_msg_to_pf(nic, &mbx); | |
410 | } | |
411 | ||
412 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, | |
413 | struct queue_set *qs, int qidx) | |
414 | { | |
415 | /* Disable timer threshold (doesn't get reset upon CQ reset */ | |
416 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); | |
417 | /* Disable completion queue */ | |
418 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); | |
419 | /* Reset completion queue */ | |
420 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
421 | } | |
422 | ||
423 | static void nicvf_reclaim_rbdr(struct nicvf *nic, | |
424 | struct rbdr *rbdr, int qidx) | |
425 | { | |
426 | u64 tmp, fifo_state; | |
427 | int timeout = 10; | |
428 | ||
429 | /* Save head and tail pointers for feeing up buffers */ | |
430 | rbdr->head = nicvf_queue_reg_read(nic, | |
431 | NIC_QSET_RBDR_0_1_HEAD, | |
432 | qidx) >> 3; | |
433 | rbdr->tail = nicvf_queue_reg_read(nic, | |
434 | NIC_QSET_RBDR_0_1_TAIL, | |
435 | qidx) >> 3; | |
436 | ||
437 | /* If RBDR FIFO is in 'FAIL' state then do a reset first | |
438 | * before relaiming. | |
439 | */ | |
440 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); | |
441 | if (((fifo_state >> 62) & 0x03) == 0x3) | |
442 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
443 | qidx, NICVF_RBDR_RESET); | |
444 | ||
445 | /* Disable RBDR */ | |
446 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); | |
447 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
448 | return; | |
449 | while (1) { | |
450 | tmp = nicvf_queue_reg_read(nic, | |
451 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, | |
452 | qidx); | |
453 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) | |
454 | break; | |
455 | usleep_range(1000, 2000); | |
456 | timeout--; | |
457 | if (!timeout) { | |
458 | netdev_err(nic->netdev, | |
459 | "Failed polling on prefetch status\n"); | |
460 | return; | |
461 | } | |
462 | } | |
463 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
464 | qidx, NICVF_RBDR_RESET); | |
465 | ||
466 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) | |
467 | return; | |
468 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); | |
469 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
470 | return; | |
471 | } | |
472 | ||
473 | /* Configures receive queue */ | |
474 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, | |
475 | int qidx, bool enable) | |
476 | { | |
477 | union nic_mbx mbx = {}; | |
478 | struct rcv_queue *rq; | |
479 | struct rq_cfg rq_cfg; | |
480 | ||
481 | rq = &qs->rq[qidx]; | |
482 | rq->enable = enable; | |
483 | ||
484 | /* Disable receive queue */ | |
485 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); | |
486 | ||
487 | if (!rq->enable) { | |
488 | nicvf_reclaim_rcv_queue(nic, qs, qidx); | |
489 | return; | |
490 | } | |
491 | ||
492 | rq->cq_qs = qs->vnic_id; | |
493 | rq->cq_idx = qidx; | |
494 | rq->start_rbdr_qs = qs->vnic_id; | |
495 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
496 | rq->cont_rbdr_qs = qs->vnic_id; | |
497 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
498 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ | |
499 | rq->caching = 1; | |
500 | ||
501 | /* Send a mailbox msg to PF to config RQ */ | |
502 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; | |
503 | mbx.rq.qs_num = qs->vnic_id; | |
504 | mbx.rq.rq_num = qidx; | |
505 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | | |
506 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | | |
507 | (rq->cont_qs_rbdr_idx << 8) | | |
508 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); | |
509 | nicvf_send_msg_to_pf(nic, &mbx); | |
510 | ||
511 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; | |
512 | mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); | |
513 | nicvf_send_msg_to_pf(nic, &mbx); | |
514 | ||
515 | /* RQ drop config | |
516 | * Enable CQ drop to reserve sufficient CQEs for all tx packets | |
517 | */ | |
518 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; | |
519 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); | |
520 | nicvf_send_msg_to_pf(nic, &mbx); | |
521 | ||
522 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); | |
523 | ||
524 | /* Enable Receive queue */ | |
525 | rq_cfg.ena = 1; | |
526 | rq_cfg.tcp_ena = 0; | |
527 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); | |
528 | } | |
529 | ||
530 | /* Configures completion queue */ | |
531 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |
532 | int qidx, bool enable) | |
533 | { | |
534 | struct cmp_queue *cq; | |
535 | struct cq_cfg cq_cfg; | |
536 | ||
537 | cq = &qs->cq[qidx]; | |
538 | cq->enable = enable; | |
539 | ||
540 | if (!cq->enable) { | |
541 | nicvf_reclaim_cmp_queue(nic, qs, qidx); | |
542 | return; | |
543 | } | |
544 | ||
545 | /* Reset completion queue */ | |
546 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
547 | ||
548 | if (!cq->enable) | |
549 | return; | |
550 | ||
551 | spin_lock_init(&cq->lock); | |
552 | /* Set completion queue base address */ | |
553 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, | |
554 | qidx, (u64)(cq->dmem.phys_base)); | |
555 | ||
556 | /* Enable Completion queue */ | |
557 | cq_cfg.ena = 1; | |
558 | cq_cfg.reset = 0; | |
559 | cq_cfg.caching = 0; | |
560 | cq_cfg.qsize = CMP_QSIZE; | |
561 | cq_cfg.avg_con = 0; | |
562 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); | |
563 | ||
564 | /* Set threshold value for interrupt generation */ | |
565 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | |
566 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | |
567 | qidx, nic->cq_coalesce_usecs); | |
568 | } | |
569 | ||
570 | /* Configures transmit queue */ | |
571 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, | |
572 | int qidx, bool enable) | |
573 | { | |
574 | union nic_mbx mbx = {}; | |
575 | struct snd_queue *sq; | |
576 | struct sq_cfg sq_cfg; | |
577 | ||
578 | sq = &qs->sq[qidx]; | |
579 | sq->enable = enable; | |
580 | ||
581 | if (!sq->enable) { | |
582 | nicvf_reclaim_snd_queue(nic, qs, qidx); | |
583 | return; | |
584 | } | |
585 | ||
586 | /* Reset send queue */ | |
587 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
588 | ||
589 | sq->cq_qs = qs->vnic_id; | |
590 | sq->cq_idx = qidx; | |
591 | ||
592 | /* Send a mailbox msg to PF to config SQ */ | |
593 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; | |
594 | mbx.sq.qs_num = qs->vnic_id; | |
595 | mbx.sq.sq_num = qidx; | |
596 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; | |
597 | nicvf_send_msg_to_pf(nic, &mbx); | |
598 | ||
599 | /* Set queue base address */ | |
600 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, | |
601 | qidx, (u64)(sq->dmem.phys_base)); | |
602 | ||
603 | /* Enable send queue & set queue size */ | |
604 | sq_cfg.ena = 1; | |
605 | sq_cfg.reset = 0; | |
606 | sq_cfg.ldwb = 0; | |
607 | sq_cfg.qsize = SND_QSIZE; | |
608 | sq_cfg.tstmp_bgx_intf = 0; | |
609 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); | |
610 | ||
611 | /* Set threshold value for interrupt generation */ | |
612 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); | |
613 | ||
614 | /* Set queue:cpu affinity for better load distribution */ | |
615 | if (cpu_online(qidx)) { | |
616 | cpumask_set_cpu(qidx, &sq->affinity_mask); | |
617 | netif_set_xps_queue(nic->netdev, | |
618 | &sq->affinity_mask, qidx); | |
619 | } | |
620 | } | |
621 | ||
622 | /* Configures receive buffer descriptor ring */ | |
623 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, | |
624 | int qidx, bool enable) | |
625 | { | |
626 | struct rbdr *rbdr; | |
627 | struct rbdr_cfg rbdr_cfg; | |
628 | ||
629 | rbdr = &qs->rbdr[qidx]; | |
630 | nicvf_reclaim_rbdr(nic, rbdr, qidx); | |
631 | if (!enable) | |
632 | return; | |
633 | ||
634 | /* Set descriptor base address */ | |
635 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, | |
636 | qidx, (u64)(rbdr->dmem.phys_base)); | |
637 | ||
638 | /* Enable RBDR & set queue size */ | |
639 | /* Buffer size should be in multiples of 128 bytes */ | |
640 | rbdr_cfg.ena = 1; | |
641 | rbdr_cfg.reset = 0; | |
642 | rbdr_cfg.ldwb = 0; | |
643 | rbdr_cfg.qsize = RBDR_SIZE; | |
644 | rbdr_cfg.avg_con = 0; | |
645 | rbdr_cfg.lines = rbdr->dma_size / 128; | |
646 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
647 | qidx, *(u64 *)&rbdr_cfg); | |
648 | ||
649 | /* Notify HW */ | |
650 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
651 | qidx, qs->rbdr_len - 1); | |
652 | ||
653 | /* Set threshold value for interrupt generation */ | |
654 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, | |
655 | qidx, rbdr->thresh - 1); | |
656 | } | |
657 | ||
658 | /* Requests PF to assign and enable Qset */ | |
659 | void nicvf_qset_config(struct nicvf *nic, bool enable) | |
660 | { | |
661 | union nic_mbx mbx = {}; | |
662 | struct queue_set *qs = nic->qs; | |
663 | struct qs_cfg *qs_cfg; | |
664 | ||
665 | if (!qs) { | |
666 | netdev_warn(nic->netdev, | |
667 | "Qset is still not allocated, don't init queues\n"); | |
668 | return; | |
669 | } | |
670 | ||
671 | qs->enable = enable; | |
672 | qs->vnic_id = nic->vf_id; | |
673 | ||
674 | /* Send a mailbox msg to PF to config Qset */ | |
675 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; | |
676 | mbx.qs.num = qs->vnic_id; | |
677 | ||
678 | mbx.qs.cfg = 0; | |
679 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; | |
680 | if (qs->enable) { | |
681 | qs_cfg->ena = 1; | |
682 | #ifdef __BIG_ENDIAN | |
683 | qs_cfg->be = 1; | |
684 | #endif | |
685 | qs_cfg->vnic = qs->vnic_id; | |
686 | } | |
687 | nicvf_send_msg_to_pf(nic, &mbx); | |
688 | } | |
689 | ||
690 | static void nicvf_free_resources(struct nicvf *nic) | |
691 | { | |
692 | int qidx; | |
693 | struct queue_set *qs = nic->qs; | |
694 | ||
695 | /* Free receive buffer descriptor ring */ | |
696 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
697 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); | |
698 | ||
699 | /* Free completion queue */ | |
700 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
701 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); | |
702 | ||
703 | /* Free send queue */ | |
704 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
705 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); | |
706 | } | |
707 | ||
708 | static int nicvf_alloc_resources(struct nicvf *nic) | |
709 | { | |
710 | int qidx; | |
711 | struct queue_set *qs = nic->qs; | |
712 | ||
713 | /* Alloc receive buffer descriptor ring */ | |
714 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | |
715 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, | |
716 | DMA_BUFFER_LEN)) | |
717 | goto alloc_fail; | |
718 | } | |
719 | ||
720 | /* Alloc send queue */ | |
721 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { | |
722 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) | |
723 | goto alloc_fail; | |
724 | } | |
725 | ||
726 | /* Alloc completion queue */ | |
727 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
728 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) | |
729 | goto alloc_fail; | |
730 | } | |
731 | ||
732 | return 0; | |
733 | alloc_fail: | |
734 | nicvf_free_resources(nic); | |
735 | return -ENOMEM; | |
736 | } | |
737 | ||
738 | int nicvf_set_qset_resources(struct nicvf *nic) | |
739 | { | |
740 | struct queue_set *qs; | |
741 | ||
742 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); | |
743 | if (!qs) | |
744 | return -ENOMEM; | |
745 | nic->qs = qs; | |
746 | ||
747 | /* Set count of each queue */ | |
748 | qs->rbdr_cnt = RBDR_CNT; | |
749 | qs->rq_cnt = RCV_QUEUE_CNT; | |
750 | qs->sq_cnt = SND_QUEUE_CNT; | |
751 | qs->cq_cnt = CMP_QUEUE_CNT; | |
752 | ||
753 | /* Set queue lengths */ | |
754 | qs->rbdr_len = RCV_BUF_COUNT; | |
755 | qs->sq_len = SND_QUEUE_LEN; | |
756 | qs->cq_len = CMP_QUEUE_LEN; | |
757 | return 0; | |
758 | } | |
759 | ||
760 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) | |
761 | { | |
762 | bool disable = false; | |
763 | struct queue_set *qs = nic->qs; | |
764 | int qidx; | |
765 | ||
766 | if (!qs) | |
767 | return 0; | |
768 | ||
769 | if (enable) { | |
770 | if (nicvf_alloc_resources(nic)) | |
771 | return -ENOMEM; | |
772 | ||
773 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
774 | nicvf_snd_queue_config(nic, qs, qidx, enable); | |
775 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
776 | nicvf_cmp_queue_config(nic, qs, qidx, enable); | |
777 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
778 | nicvf_rbdr_config(nic, qs, qidx, enable); | |
779 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
780 | nicvf_rcv_queue_config(nic, qs, qidx, enable); | |
781 | } else { | |
782 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
783 | nicvf_rcv_queue_config(nic, qs, qidx, disable); | |
784 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
785 | nicvf_rbdr_config(nic, qs, qidx, disable); | |
786 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
787 | nicvf_snd_queue_config(nic, qs, qidx, disable); | |
788 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
789 | nicvf_cmp_queue_config(nic, qs, qidx, disable); | |
790 | ||
791 | nicvf_free_resources(nic); | |
792 | } | |
793 | ||
794 | return 0; | |
795 | } | |
796 | ||
797 | /* Get a free desc from SQ | |
798 | * returns descriptor ponter & descriptor number | |
799 | */ | |
800 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) | |
801 | { | |
802 | int qentry; | |
803 | ||
804 | qentry = sq->tail; | |
805 | atomic_sub(desc_cnt, &sq->free_cnt); | |
806 | sq->tail += desc_cnt; | |
807 | sq->tail &= (sq->dmem.q_len - 1); | |
808 | ||
809 | return qentry; | |
810 | } | |
811 | ||
812 | /* Free descriptor back to SQ for future use */ | |
813 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) | |
814 | { | |
815 | atomic_add(desc_cnt, &sq->free_cnt); | |
816 | sq->head += desc_cnt; | |
817 | sq->head &= (sq->dmem.q_len - 1); | |
818 | } | |
819 | ||
820 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) | |
821 | { | |
822 | qentry++; | |
823 | qentry &= (sq->dmem.q_len - 1); | |
824 | return qentry; | |
825 | } | |
826 | ||
827 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) | |
828 | { | |
829 | u64 sq_cfg; | |
830 | ||
831 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
832 | sq_cfg |= NICVF_SQ_EN; | |
833 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
834 | /* Ring doorbell so that H/W restarts processing SQEs */ | |
835 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); | |
836 | } | |
837 | ||
838 | void nicvf_sq_disable(struct nicvf *nic, int qidx) | |
839 | { | |
840 | u64 sq_cfg; | |
841 | ||
842 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
843 | sq_cfg &= ~NICVF_SQ_EN; | |
844 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
845 | } | |
846 | ||
847 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, | |
848 | int qidx) | |
849 | { | |
850 | u64 head, tail; | |
851 | struct sk_buff *skb; | |
852 | struct nicvf *nic = netdev_priv(netdev); | |
853 | struct sq_hdr_subdesc *hdr; | |
854 | ||
855 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; | |
856 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; | |
857 | while (sq->head != head) { | |
858 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); | |
859 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { | |
860 | nicvf_put_sq_desc(sq, 1); | |
861 | continue; | |
862 | } | |
863 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | |
864 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); | |
865 | atomic64_add(hdr->tot_len, | |
866 | (atomic64_t *)&netdev->stats.tx_bytes); | |
867 | dev_kfree_skb_any(skb); | |
868 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); | |
869 | } | |
870 | } | |
871 | ||
872 | /* Calculate no of SQ subdescriptors needed to transmit all | |
873 | * segments of this TSO packet. | |
874 | * Taken from 'Tilera network driver' with a minor modification. | |
875 | */ | |
876 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) | |
877 | { | |
878 | struct skb_shared_info *sh = skb_shinfo(skb); | |
879 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
880 | unsigned int data_len = skb->len - sh_len; | |
881 | unsigned int p_len = sh->gso_size; | |
882 | long f_id = -1; /* id of the current fragment */ | |
883 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ | |
884 | long f_used = 0; /* bytes used from the current fragment */ | |
885 | long n; /* size of the current piece of payload */ | |
886 | int num_edescs = 0; | |
887 | int segment; | |
888 | ||
889 | for (segment = 0; segment < sh->gso_segs; segment++) { | |
890 | unsigned int p_used = 0; | |
891 | ||
892 | /* One edesc for header and for each piece of the payload. */ | |
893 | for (num_edescs++; p_used < p_len; num_edescs++) { | |
894 | /* Advance as needed. */ | |
895 | while (f_used >= f_size) { | |
896 | f_id++; | |
897 | f_size = skb_frag_size(&sh->frags[f_id]); | |
898 | f_used = 0; | |
899 | } | |
900 | ||
901 | /* Use bytes from the current fragment. */ | |
902 | n = p_len - p_used; | |
903 | if (n > f_size - f_used) | |
904 | n = f_size - f_used; | |
905 | f_used += n; | |
906 | p_used += n; | |
907 | } | |
908 | ||
909 | /* The last segment may be less than gso_size. */ | |
910 | data_len -= p_len; | |
911 | if (data_len < p_len) | |
912 | p_len = data_len; | |
913 | } | |
914 | ||
915 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ | |
916 | return num_edescs + sh->gso_segs; | |
917 | } | |
918 | ||
919 | /* Get the number of SQ descriptors needed to xmit this skb */ | |
920 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | |
921 | { | |
922 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; | |
923 | ||
924 | if (skb_shinfo(skb)->gso_size) { | |
925 | subdesc_cnt = nicvf_tso_count_subdescs(skb); | |
926 | return subdesc_cnt; | |
927 | } | |
928 | ||
929 | if (skb_shinfo(skb)->nr_frags) | |
930 | subdesc_cnt += skb_shinfo(skb)->nr_frags; | |
931 | ||
932 | return subdesc_cnt; | |
933 | } | |
934 | ||
935 | /* Add SQ HEADER subdescriptor. | |
936 | * First subdescriptor for every send descriptor. | |
937 | */ | |
938 | static inline void | |
939 | nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, | |
940 | int subdesc_cnt, struct sk_buff *skb, int len) | |
941 | { | |
942 | int proto; | |
943 | struct sq_hdr_subdesc *hdr; | |
944 | ||
945 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | |
946 | sq->skbuff[qentry] = (u64)skb; | |
947 | ||
948 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | |
949 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | |
950 | /* Enable notification via CQE after processing SQE */ | |
951 | hdr->post_cqe = 1; | |
952 | /* No of subdescriptors following this */ | |
953 | hdr->subdesc_cnt = subdesc_cnt; | |
954 | hdr->tot_len = len; | |
955 | ||
956 | /* Offload checksum calculation to HW */ | |
957 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
958 | if (skb->protocol != htons(ETH_P_IP)) | |
959 | return; | |
960 | ||
961 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ | |
962 | hdr->l3_offset = skb_network_offset(skb); | |
963 | hdr->l4_offset = skb_transport_offset(skb); | |
964 | ||
965 | proto = ip_hdr(skb)->protocol; | |
966 | switch (proto) { | |
967 | case IPPROTO_TCP: | |
968 | hdr->csum_l4 = SEND_L4_CSUM_TCP; | |
969 | break; | |
970 | case IPPROTO_UDP: | |
971 | hdr->csum_l4 = SEND_L4_CSUM_UDP; | |
972 | break; | |
973 | case IPPROTO_SCTP: | |
974 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; | |
975 | break; | |
976 | } | |
977 | } | |
978 | } | |
979 | ||
980 | /* SQ GATHER subdescriptor | |
981 | * Must follow HDR descriptor | |
982 | */ | |
983 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | |
984 | int size, u64 data) | |
985 | { | |
986 | struct sq_gather_subdesc *gather; | |
987 | ||
988 | qentry &= (sq->dmem.q_len - 1); | |
989 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); | |
990 | ||
991 | memset(gather, 0, SND_QUEUE_DESC_SIZE); | |
992 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; | |
993 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; | |
994 | gather->size = size; | |
995 | gather->addr = data; | |
996 | } | |
997 | ||
998 | /* Segment a TSO packet into 'gso_size' segments and append | |
999 | * them to SQ for transfer | |
1000 | */ | |
1001 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | |
1002 | int qentry, struct sk_buff *skb) | |
1003 | { | |
1004 | struct tso_t tso; | |
1005 | int seg_subdescs = 0, desc_cnt = 0; | |
1006 | int seg_len, total_len, data_left; | |
1007 | int hdr_qentry = qentry; | |
1008 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1009 | ||
1010 | tso_start(skb, &tso); | |
1011 | total_len = skb->len - hdr_len; | |
1012 | while (total_len > 0) { | |
1013 | char *hdr; | |
1014 | ||
1015 | /* Save Qentry for adding HDR_SUBDESC at the end */ | |
1016 | hdr_qentry = qentry; | |
1017 | ||
1018 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | |
1019 | total_len -= data_left; | |
1020 | ||
1021 | /* Add segment's header */ | |
1022 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1023 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; | |
1024 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | |
1025 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, | |
1026 | sq->tso_hdrs_phys + | |
1027 | qentry * TSO_HEADER_SIZE); | |
1028 | /* HDR_SUDESC + GATHER */ | |
1029 | seg_subdescs = 2; | |
1030 | seg_len = hdr_len; | |
1031 | ||
1032 | /* Add segment's payload fragments */ | |
1033 | while (data_left > 0) { | |
1034 | int size; | |
1035 | ||
1036 | size = min_t(int, tso.size, data_left); | |
1037 | ||
1038 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1039 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1040 | virt_to_phys(tso.data)); | |
1041 | seg_subdescs++; | |
1042 | seg_len += size; | |
1043 | ||
1044 | data_left -= size; | |
1045 | tso_build_data(skb, &tso, size); | |
1046 | } | |
1047 | nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, | |
1048 | seg_subdescs - 1, skb, seg_len); | |
1049 | sq->skbuff[hdr_qentry] = 0; | |
1050 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1051 | ||
1052 | desc_cnt += seg_subdescs; | |
1053 | } | |
1054 | /* Save SKB in the last segment for freeing */ | |
1055 | sq->skbuff[hdr_qentry] = (u64)skb; | |
1056 | ||
1057 | /* make sure all memory stores are done before ringing doorbell */ | |
1058 | smp_wmb(); | |
1059 | ||
1060 | /* Inform HW to xmit all TSO segments */ | |
1061 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
1062 | skb_get_queue_mapping(skb), desc_cnt); | |
1063 | return 1; | |
1064 | } | |
1065 | ||
1066 | /* Append an skb to a SQ for packet transfer. */ | |
1067 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | |
1068 | { | |
1069 | int i, size; | |
1070 | int subdesc_cnt; | |
1071 | int sq_num, qentry; | |
1072 | struct queue_set *qs = nic->qs; | |
1073 | struct snd_queue *sq; | |
1074 | ||
1075 | sq_num = skb_get_queue_mapping(skb); | |
1076 | sq = &qs->sq[sq_num]; | |
1077 | ||
1078 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); | |
1079 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) | |
1080 | goto append_fail; | |
1081 | ||
1082 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | |
1083 | ||
1084 | /* Check if its a TSO packet */ | |
1085 | if (skb_shinfo(skb)->gso_size) | |
1086 | return nicvf_sq_append_tso(nic, sq, qentry, skb); | |
1087 | ||
1088 | /* Add SQ header subdesc */ | |
1089 | nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); | |
1090 | ||
1091 | /* Add SQ gather subdescs */ | |
1092 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1093 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | |
1094 | nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); | |
1095 | ||
1096 | /* Check for scattered buffer */ | |
1097 | if (!skb_is_nonlinear(skb)) | |
1098 | goto doorbell; | |
1099 | ||
1100 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1101 | const struct skb_frag_struct *frag; | |
1102 | ||
1103 | frag = &skb_shinfo(skb)->frags[i]; | |
1104 | ||
1105 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1106 | size = skb_frag_size(frag); | |
1107 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1108 | virt_to_phys( | |
1109 | skb_frag_address(frag))); | |
1110 | } | |
1111 | ||
1112 | doorbell: | |
1113 | /* make sure all memory stores are done before ringing doorbell */ | |
1114 | smp_wmb(); | |
1115 | ||
1116 | /* Inform HW to xmit new packet */ | |
1117 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
1118 | sq_num, subdesc_cnt); | |
1119 | return 1; | |
1120 | ||
1121 | append_fail: | |
1122 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); | |
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | static inline unsigned frag_num(unsigned i) | |
1127 | { | |
1128 | #ifdef __BIG_ENDIAN | |
1129 | return (i & ~3) + 3 - (i & 3); | |
1130 | #else | |
1131 | return i; | |
1132 | #endif | |
1133 | } | |
1134 | ||
1135 | /* Returns SKB for a received packet */ | |
1136 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |
1137 | { | |
1138 | int frag; | |
1139 | int payload_len = 0; | |
1140 | struct sk_buff *skb = NULL; | |
1141 | struct sk_buff *skb_frag = NULL; | |
1142 | struct sk_buff *prev_frag = NULL; | |
1143 | u16 *rb_lens = NULL; | |
1144 | u64 *rb_ptrs = NULL; | |
1145 | ||
1146 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); | |
1147 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); | |
1148 | ||
1149 | netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", | |
1150 | __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); | |
1151 | ||
1152 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { | |
1153 | payload_len = rb_lens[frag_num(frag)]; | |
1154 | if (!frag) { | |
1155 | /* First fragment */ | |
1156 | skb = nicvf_rb_ptr_to_skb(nic, | |
1157 | *rb_ptrs - cqe_rx->align_pad, | |
1158 | payload_len); | |
1159 | if (!skb) | |
1160 | return NULL; | |
1161 | skb_reserve(skb, cqe_rx->align_pad); | |
1162 | skb_put(skb, payload_len); | |
1163 | } else { | |
1164 | /* Add fragments */ | |
1165 | skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, | |
1166 | payload_len); | |
1167 | if (!skb_frag) { | |
1168 | dev_kfree_skb(skb); | |
1169 | return NULL; | |
1170 | } | |
1171 | ||
1172 | if (!skb_shinfo(skb)->frag_list) | |
1173 | skb_shinfo(skb)->frag_list = skb_frag; | |
1174 | else | |
1175 | prev_frag->next = skb_frag; | |
1176 | ||
1177 | prev_frag = skb_frag; | |
1178 | skb->len += payload_len; | |
1179 | skb->data_len += payload_len; | |
1180 | skb_frag->len = payload_len; | |
1181 | } | |
1182 | /* Next buffer pointer */ | |
1183 | rb_ptrs++; | |
1184 | } | |
1185 | return skb; | |
1186 | } | |
1187 | ||
1188 | /* Enable interrupt */ | |
1189 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1190 | { | |
1191 | u64 reg_val; | |
1192 | ||
1193 | reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); | |
1194 | ||
1195 | switch (int_type) { | |
1196 | case NICVF_INTR_CQ: | |
1197 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1198 | break; | |
1199 | case NICVF_INTR_SQ: | |
1200 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1201 | break; | |
1202 | case NICVF_INTR_RBDR: | |
1203 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1204 | break; | |
1205 | case NICVF_INTR_PKT_DROP: | |
1206 | reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | |
1207 | break; | |
1208 | case NICVF_INTR_TCP_TIMER: | |
1209 | reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | |
1210 | break; | |
1211 | case NICVF_INTR_MBOX: | |
1212 | reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | |
1213 | break; | |
1214 | case NICVF_INTR_QS_ERR: | |
1215 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | |
1216 | break; | |
1217 | default: | |
1218 | netdev_err(nic->netdev, | |
1219 | "Failed to enable interrupt: unknown type\n"); | |
1220 | break; | |
1221 | } | |
1222 | ||
1223 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); | |
1224 | } | |
1225 | ||
1226 | /* Disable interrupt */ | |
1227 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1228 | { | |
1229 | u64 reg_val = 0; | |
1230 | ||
1231 | switch (int_type) { | |
1232 | case NICVF_INTR_CQ: | |
1233 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1234 | break; | |
1235 | case NICVF_INTR_SQ: | |
1236 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1237 | break; | |
1238 | case NICVF_INTR_RBDR: | |
1239 | reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1240 | break; | |
1241 | case NICVF_INTR_PKT_DROP: | |
1242 | reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | |
1243 | break; | |
1244 | case NICVF_INTR_TCP_TIMER: | |
1245 | reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | |
1246 | break; | |
1247 | case NICVF_INTR_MBOX: | |
1248 | reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); | |
1249 | break; | |
1250 | case NICVF_INTR_QS_ERR: | |
1251 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | |
1252 | break; | |
1253 | default: | |
1254 | netdev_err(nic->netdev, | |
1255 | "Failed to disable interrupt: unknown type\n"); | |
1256 | break; | |
1257 | } | |
1258 | ||
1259 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); | |
1260 | } | |
1261 | ||
1262 | /* Clear interrupt */ | |
1263 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) | |
1264 | { | |
1265 | u64 reg_val = 0; | |
1266 | ||
1267 | switch (int_type) { | |
1268 | case NICVF_INTR_CQ: | |
1269 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1270 | break; | |
1271 | case NICVF_INTR_SQ: | |
1272 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1273 | break; | |
1274 | case NICVF_INTR_RBDR: | |
1275 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1276 | break; | |
1277 | case NICVF_INTR_PKT_DROP: | |
1278 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); | |
1279 | break; | |
1280 | case NICVF_INTR_TCP_TIMER: | |
1281 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); | |
1282 | break; | |
1283 | case NICVF_INTR_MBOX: | |
1284 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); | |
1285 | break; | |
1286 | case NICVF_INTR_QS_ERR: | |
1287 | reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); | |
1288 | break; | |
1289 | default: | |
1290 | netdev_err(nic->netdev, | |
1291 | "Failed to clear interrupt: unknown type\n"); | |
1292 | break; | |
1293 | } | |
1294 | ||
1295 | nicvf_reg_write(nic, NIC_VF_INT, reg_val); | |
1296 | } | |
1297 | ||
1298 | /* Check if interrupt is enabled */ | |
1299 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) | |
1300 | { | |
1301 | u64 reg_val; | |
1302 | u64 mask = 0xff; | |
1303 | ||
1304 | reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); | |
1305 | ||
1306 | switch (int_type) { | |
1307 | case NICVF_INTR_CQ: | |
1308 | mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); | |
1309 | break; | |
1310 | case NICVF_INTR_SQ: | |
1311 | mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); | |
1312 | break; | |
1313 | case NICVF_INTR_RBDR: | |
1314 | mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); | |
1315 | break; | |
1316 | case NICVF_INTR_PKT_DROP: | |
1317 | mask = NICVF_INTR_PKT_DROP_MASK; | |
1318 | break; | |
1319 | case NICVF_INTR_TCP_TIMER: | |
1320 | mask = NICVF_INTR_TCP_TIMER_MASK; | |
1321 | break; | |
1322 | case NICVF_INTR_MBOX: | |
1323 | mask = NICVF_INTR_MBOX_MASK; | |
1324 | break; | |
1325 | case NICVF_INTR_QS_ERR: | |
1326 | mask = NICVF_INTR_QS_ERR_MASK; | |
1327 | break; | |
1328 | default: | |
1329 | netdev_err(nic->netdev, | |
1330 | "Failed to check interrupt enable: unknown type\n"); | |
1331 | break; | |
1332 | } | |
1333 | ||
1334 | return (reg_val & mask); | |
1335 | } | |
1336 | ||
1337 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) | |
1338 | { | |
1339 | struct rcv_queue *rq; | |
1340 | ||
1341 | #define GET_RQ_STATS(reg) \ | |
1342 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ | |
1343 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1344 | ||
1345 | rq = &nic->qs->rq[rq_idx]; | |
1346 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); | |
1347 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); | |
1348 | } | |
1349 | ||
1350 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | |
1351 | { | |
1352 | struct snd_queue *sq; | |
1353 | ||
1354 | #define GET_SQ_STATS(reg) \ | |
1355 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ | |
1356 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1357 | ||
1358 | sq = &nic->qs->sq[sq_idx]; | |
1359 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); | |
1360 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); | |
1361 | } | |
1362 | ||
1363 | /* Check for errors in the receive cmp.queue entry */ | |
1364 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, | |
1365 | struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) | |
1366 | { | |
1367 | struct cmp_queue_stats *stats = &cq->stats; | |
1368 | ||
1369 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) { | |
1370 | stats->rx.errop.good++; | |
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | if (netif_msg_rx_err(nic)) | |
1375 | netdev_err(nic->netdev, | |
1376 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", | |
1377 | nic->netdev->name, | |
1378 | cqe_rx->err_level, cqe_rx->err_opcode); | |
1379 | ||
1380 | switch (cqe_rx->err_level) { | |
1381 | case CQ_ERRLVL_MAC: | |
1382 | stats->rx.errlvl.mac_errs++; | |
1383 | break; | |
1384 | case CQ_ERRLVL_L2: | |
1385 | stats->rx.errlvl.l2_errs++; | |
1386 | break; | |
1387 | case CQ_ERRLVL_L3: | |
1388 | stats->rx.errlvl.l3_errs++; | |
1389 | break; | |
1390 | case CQ_ERRLVL_L4: | |
1391 | stats->rx.errlvl.l4_errs++; | |
1392 | break; | |
1393 | } | |
1394 | ||
1395 | switch (cqe_rx->err_opcode) { | |
1396 | case CQ_RX_ERROP_RE_PARTIAL: | |
1397 | stats->rx.errop.partial_pkts++; | |
1398 | break; | |
1399 | case CQ_RX_ERROP_RE_JABBER: | |
1400 | stats->rx.errop.jabber_errs++; | |
1401 | break; | |
1402 | case CQ_RX_ERROP_RE_FCS: | |
1403 | stats->rx.errop.fcs_errs++; | |
1404 | break; | |
1405 | case CQ_RX_ERROP_RE_TERMINATE: | |
1406 | stats->rx.errop.terminate_errs++; | |
1407 | break; | |
1408 | case CQ_RX_ERROP_RE_RX_CTL: | |
1409 | stats->rx.errop.bgx_rx_errs++; | |
1410 | break; | |
1411 | case CQ_RX_ERROP_PREL2_ERR: | |
1412 | stats->rx.errop.prel2_errs++; | |
1413 | break; | |
1414 | case CQ_RX_ERROP_L2_FRAGMENT: | |
1415 | stats->rx.errop.l2_frags++; | |
1416 | break; | |
1417 | case CQ_RX_ERROP_L2_OVERRUN: | |
1418 | stats->rx.errop.l2_overruns++; | |
1419 | break; | |
1420 | case CQ_RX_ERROP_L2_PFCS: | |
1421 | stats->rx.errop.l2_pfcs++; | |
1422 | break; | |
1423 | case CQ_RX_ERROP_L2_PUNY: | |
1424 | stats->rx.errop.l2_puny++; | |
1425 | break; | |
1426 | case CQ_RX_ERROP_L2_MAL: | |
1427 | stats->rx.errop.l2_hdr_malformed++; | |
1428 | break; | |
1429 | case CQ_RX_ERROP_L2_OVERSIZE: | |
1430 | stats->rx.errop.l2_oversize++; | |
1431 | break; | |
1432 | case CQ_RX_ERROP_L2_UNDERSIZE: | |
1433 | stats->rx.errop.l2_undersize++; | |
1434 | break; | |
1435 | case CQ_RX_ERROP_L2_LENMISM: | |
1436 | stats->rx.errop.l2_len_mismatch++; | |
1437 | break; | |
1438 | case CQ_RX_ERROP_L2_PCLP: | |
1439 | stats->rx.errop.l2_pclp++; | |
1440 | break; | |
1441 | case CQ_RX_ERROP_IP_NOT: | |
1442 | stats->rx.errop.non_ip++; | |
1443 | break; | |
1444 | case CQ_RX_ERROP_IP_CSUM_ERR: | |
1445 | stats->rx.errop.ip_csum_err++; | |
1446 | break; | |
1447 | case CQ_RX_ERROP_IP_MAL: | |
1448 | stats->rx.errop.ip_hdr_malformed++; | |
1449 | break; | |
1450 | case CQ_RX_ERROP_IP_MALD: | |
1451 | stats->rx.errop.ip_payload_malformed++; | |
1452 | break; | |
1453 | case CQ_RX_ERROP_IP_HOP: | |
1454 | stats->rx.errop.ip_hop_errs++; | |
1455 | break; | |
1456 | case CQ_RX_ERROP_L3_ICRC: | |
1457 | stats->rx.errop.l3_icrc_errs++; | |
1458 | break; | |
1459 | case CQ_RX_ERROP_L3_PCLP: | |
1460 | stats->rx.errop.l3_pclp++; | |
1461 | break; | |
1462 | case CQ_RX_ERROP_L4_MAL: | |
1463 | stats->rx.errop.l4_malformed++; | |
1464 | break; | |
1465 | case CQ_RX_ERROP_L4_CHK: | |
1466 | stats->rx.errop.l4_csum_errs++; | |
1467 | break; | |
1468 | case CQ_RX_ERROP_UDP_LEN: | |
1469 | stats->rx.errop.udp_len_err++; | |
1470 | break; | |
1471 | case CQ_RX_ERROP_L4_PORT: | |
1472 | stats->rx.errop.bad_l4_port++; | |
1473 | break; | |
1474 | case CQ_RX_ERROP_TCP_FLAG: | |
1475 | stats->rx.errop.bad_tcp_flag++; | |
1476 | break; | |
1477 | case CQ_RX_ERROP_TCP_OFFSET: | |
1478 | stats->rx.errop.tcp_offset_errs++; | |
1479 | break; | |
1480 | case CQ_RX_ERROP_L4_PCLP: | |
1481 | stats->rx.errop.l4_pclp++; | |
1482 | break; | |
1483 | case CQ_RX_ERROP_RBDR_TRUNC: | |
1484 | stats->rx.errop.pkt_truncated++; | |
1485 | break; | |
1486 | } | |
1487 | ||
1488 | return 1; | |
1489 | } | |
1490 | ||
1491 | /* Check for errors in the send cmp.queue entry */ | |
1492 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | |
1493 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx) | |
1494 | { | |
1495 | struct cmp_queue_stats *stats = &cq->stats; | |
1496 | ||
1497 | switch (cqe_tx->send_status) { | |
1498 | case CQ_TX_ERROP_GOOD: | |
1499 | stats->tx.good++; | |
1500 | return 0; | |
1501 | case CQ_TX_ERROP_DESC_FAULT: | |
1502 | stats->tx.desc_fault++; | |
1503 | break; | |
1504 | case CQ_TX_ERROP_HDR_CONS_ERR: | |
1505 | stats->tx.hdr_cons_err++; | |
1506 | break; | |
1507 | case CQ_TX_ERROP_SUBDC_ERR: | |
1508 | stats->tx.subdesc_err++; | |
1509 | break; | |
1510 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: | |
1511 | stats->tx.imm_size_oflow++; | |
1512 | break; | |
1513 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: | |
1514 | stats->tx.data_seq_err++; | |
1515 | break; | |
1516 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: | |
1517 | stats->tx.mem_seq_err++; | |
1518 | break; | |
1519 | case CQ_TX_ERROP_LOCK_VIOL: | |
1520 | stats->tx.lock_viol++; | |
1521 | break; | |
1522 | case CQ_TX_ERROP_DATA_FAULT: | |
1523 | stats->tx.data_fault++; | |
1524 | break; | |
1525 | case CQ_TX_ERROP_TSTMP_CONFLICT: | |
1526 | stats->tx.tstmp_conflict++; | |
1527 | break; | |
1528 | case CQ_TX_ERROP_TSTMP_TIMEOUT: | |
1529 | stats->tx.tstmp_timeout++; | |
1530 | break; | |
1531 | case CQ_TX_ERROP_MEM_FAULT: | |
1532 | stats->tx.mem_fault++; | |
1533 | break; | |
1534 | case CQ_TX_ERROP_CK_OVERLAP: | |
1535 | stats->tx.csum_overlap++; | |
1536 | break; | |
1537 | case CQ_TX_ERROP_CK_OFLOW: | |
1538 | stats->tx.csum_overflow++; | |
1539 | break; | |
1540 | } | |
1541 | ||
1542 | return 1; | |
1543 | } |