Commit | Line | Data |
---|---|---|
4863dea3 SG |
1 | /* |
2 | * Copyright (C) 2015 Cavium, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of version 2 of the GNU General Public License | |
6 | * as published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/pci.h> | |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/ip.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <net/ip.h> | |
14 | #include <net/tso.h> | |
15 | ||
16 | #include "nic_reg.h" | |
17 | #include "nic.h" | |
18 | #include "q_struct.h" | |
19 | #include "nicvf_queues.h" | |
20 | ||
4863dea3 SG |
21 | /* Poll a register for a specific value */ |
22 | static int nicvf_poll_reg(struct nicvf *nic, int qidx, | |
23 | u64 reg, int bit_pos, int bits, int val) | |
24 | { | |
25 | u64 bit_mask; | |
26 | u64 reg_val; | |
27 | int timeout = 10; | |
28 | ||
29 | bit_mask = (1ULL << bits) - 1; | |
30 | bit_mask = (bit_mask << bit_pos); | |
31 | ||
32 | while (timeout) { | |
33 | reg_val = nicvf_queue_reg_read(nic, reg, qidx); | |
34 | if (((reg_val & bit_mask) >> bit_pos) == val) | |
35 | return 0; | |
36 | usleep_range(1000, 2000); | |
37 | timeout--; | |
38 | } | |
39 | netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); | |
40 | return 1; | |
41 | } | |
42 | ||
43 | /* Allocate memory for a queue's descriptors */ | |
44 | static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, | |
45 | int q_len, int desc_size, int align_bytes) | |
46 | { | |
47 | dmem->q_len = q_len; | |
48 | dmem->size = (desc_size * q_len) + align_bytes; | |
49 | /* Save address, need it while freeing */ | |
50 | dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, | |
51 | &dmem->dma, GFP_KERNEL); | |
52 | if (!dmem->unalign_base) | |
53 | return -ENOMEM; | |
54 | ||
55 | /* Align memory address for 'align_bytes' */ | |
56 | dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); | |
39a0dd0b | 57 | dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); |
4863dea3 SG |
58 | return 0; |
59 | } | |
60 | ||
61 | /* Free queue's descriptor memory */ | |
62 | static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) | |
63 | { | |
64 | if (!dmem) | |
65 | return; | |
66 | ||
67 | dma_free_coherent(&nic->pdev->dev, dmem->size, | |
68 | dmem->unalign_base, dmem->dma); | |
69 | dmem->unalign_base = NULL; | |
70 | dmem->base = NULL; | |
71 | } | |
72 | ||
73 | /* Allocate buffer for packet reception | |
74 | * HW returns memory address where packet is DMA'ed but not a pointer | |
75 | * into RBDR ring, so save buffer address at the start of fragment and | |
76 | * align the start address to a cache aligned address | |
77 | */ | |
78 | static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, | |
79 | u32 buf_len, u64 **rbuf) | |
80 | { | |
4863dea3 SG |
81 | int order = get_order(buf_len); |
82 | ||
83 | /* Check if request can be accomodated in previous allocated page */ | |
84 | if (nic->rb_page) { | |
85 | if ((nic->rb_page_offset + buf_len + buf_len) > | |
86 | (PAGE_SIZE << order)) { | |
87 | nic->rb_page = NULL; | |
88 | } else { | |
89 | nic->rb_page_offset += buf_len; | |
90 | get_page(nic->rb_page); | |
91 | } | |
92 | } | |
93 | ||
94 | /* Allocate a new page */ | |
95 | if (!nic->rb_page) { | |
f8ce9666 SG |
96 | nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, |
97 | order); | |
4863dea3 | 98 | if (!nic->rb_page) { |
f8ce9666 SG |
99 | netdev_err(nic->netdev, |
100 | "Failed to allocate new rcv buffer\n"); | |
4863dea3 SG |
101 | return -ENOMEM; |
102 | } | |
103 | nic->rb_page_offset = 0; | |
104 | } | |
105 | ||
668dda06 | 106 | *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); |
4863dea3 | 107 | |
4863dea3 SG |
108 | return 0; |
109 | } | |
110 | ||
668dda06 | 111 | /* Build skb around receive buffer */ |
4863dea3 SG |
112 | static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, |
113 | u64 rb_ptr, int len) | |
114 | { | |
668dda06 | 115 | void *data; |
4863dea3 | 116 | struct sk_buff *skb; |
4863dea3 | 117 | |
668dda06 | 118 | data = phys_to_virt(rb_ptr); |
4863dea3 SG |
119 | |
120 | /* Now build an skb to give to stack */ | |
668dda06 | 121 | skb = build_skb(data, RCV_FRAG_LEN); |
4863dea3 | 122 | if (!skb) { |
668dda06 | 123 | put_page(virt_to_page(data)); |
4863dea3 SG |
124 | return NULL; |
125 | } | |
126 | ||
668dda06 | 127 | prefetch(skb->data); |
4863dea3 SG |
128 | return skb; |
129 | } | |
130 | ||
131 | /* Allocate RBDR ring and populate receive buffers */ | |
132 | static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, | |
133 | int ring_len, int buf_size) | |
134 | { | |
135 | int idx; | |
136 | u64 *rbuf; | |
137 | struct rbdr_entry_t *desc; | |
138 | int err; | |
139 | ||
140 | err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, | |
141 | sizeof(struct rbdr_entry_t), | |
142 | NICVF_RCV_BUF_ALIGN_BYTES); | |
143 | if (err) | |
144 | return err; | |
145 | ||
146 | rbdr->desc = rbdr->dmem.base; | |
147 | /* Buffer size has to be in multiples of 128 bytes */ | |
148 | rbdr->dma_size = buf_size; | |
149 | rbdr->enable = true; | |
150 | rbdr->thresh = RBDR_THRESH; | |
151 | ||
152 | nic->rb_page = NULL; | |
153 | for (idx = 0; idx < ring_len; idx++) { | |
154 | err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, | |
155 | &rbuf); | |
156 | if (err) | |
157 | return err; | |
158 | ||
159 | desc = GET_RBDR_DESC(rbdr, idx); | |
160 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
161 | } | |
162 | return 0; | |
163 | } | |
164 | ||
165 | /* Free RBDR ring and its receive buffers */ | |
166 | static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) | |
167 | { | |
168 | int head, tail; | |
169 | u64 buf_addr; | |
170 | struct rbdr_entry_t *desc; | |
4863dea3 SG |
171 | |
172 | if (!rbdr) | |
173 | return; | |
174 | ||
175 | rbdr->enable = false; | |
176 | if (!rbdr->dmem.base) | |
177 | return; | |
178 | ||
179 | head = rbdr->head; | |
180 | tail = rbdr->tail; | |
181 | ||
182 | /* Free SKBs */ | |
183 | while (head != tail) { | |
184 | desc = GET_RBDR_DESC(rbdr, head); | |
185 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
668dda06 | 186 | put_page(virt_to_page(phys_to_virt(buf_addr))); |
4863dea3 SG |
187 | head++; |
188 | head &= (rbdr->dmem.q_len - 1); | |
189 | } | |
190 | /* Free SKB of tail desc */ | |
191 | desc = GET_RBDR_DESC(rbdr, tail); | |
192 | buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; | |
668dda06 | 193 | put_page(virt_to_page(phys_to_virt(buf_addr))); |
4863dea3 SG |
194 | |
195 | /* Free RBDR ring */ | |
196 | nicvf_free_q_desc_mem(nic, &rbdr->dmem); | |
197 | } | |
198 | ||
199 | /* Refill receive buffer descriptors with new buffers. | |
200 | */ | |
fd7ec062 | 201 | static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) |
4863dea3 SG |
202 | { |
203 | struct queue_set *qs = nic->qs; | |
204 | int rbdr_idx = qs->rbdr_cnt; | |
205 | int tail, qcount; | |
206 | int refill_rb_cnt; | |
207 | struct rbdr *rbdr; | |
208 | struct rbdr_entry_t *desc; | |
209 | u64 *rbuf; | |
210 | int new_rb = 0; | |
211 | ||
212 | refill: | |
213 | if (!rbdr_idx) | |
214 | return; | |
215 | rbdr_idx--; | |
216 | rbdr = &qs->rbdr[rbdr_idx]; | |
217 | /* Check if it's enabled */ | |
218 | if (!rbdr->enable) | |
219 | goto next_rbdr; | |
220 | ||
221 | /* Get no of desc's to be refilled */ | |
222 | qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); | |
223 | qcount &= 0x7FFFF; | |
224 | /* Doorbell can be ringed with a max of ring size minus 1 */ | |
225 | if (qcount >= (qs->rbdr_len - 1)) | |
226 | goto next_rbdr; | |
227 | else | |
228 | refill_rb_cnt = qs->rbdr_len - qcount - 1; | |
229 | ||
230 | /* Start filling descs from tail */ | |
231 | tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; | |
232 | while (refill_rb_cnt) { | |
233 | tail++; | |
234 | tail &= (rbdr->dmem.q_len - 1); | |
235 | ||
236 | if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) | |
237 | break; | |
238 | ||
239 | desc = GET_RBDR_DESC(rbdr, tail); | |
240 | desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; | |
241 | refill_rb_cnt--; | |
242 | new_rb++; | |
243 | } | |
244 | ||
245 | /* make sure all memory stores are done before ringing doorbell */ | |
246 | smp_wmb(); | |
247 | ||
248 | /* Check if buffer allocation failed */ | |
249 | if (refill_rb_cnt) | |
250 | nic->rb_alloc_fail = true; | |
251 | else | |
252 | nic->rb_alloc_fail = false; | |
253 | ||
254 | /* Notify HW */ | |
255 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
256 | rbdr_idx, new_rb); | |
257 | next_rbdr: | |
258 | /* Re-enable RBDR interrupts only if buffer allocation is success */ | |
259 | if (!nic->rb_alloc_fail && rbdr->enable) | |
260 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); | |
261 | ||
262 | if (rbdr_idx) | |
263 | goto refill; | |
264 | } | |
265 | ||
266 | /* Alloc rcv buffers in non-atomic mode for better success */ | |
267 | void nicvf_rbdr_work(struct work_struct *work) | |
268 | { | |
269 | struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); | |
270 | ||
271 | nicvf_refill_rbdr(nic, GFP_KERNEL); | |
272 | if (nic->rb_alloc_fail) | |
273 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
274 | else | |
275 | nic->rb_work_scheduled = false; | |
276 | } | |
277 | ||
278 | /* In Softirq context, alloc rcv buffers in atomic mode */ | |
279 | void nicvf_rbdr_task(unsigned long data) | |
280 | { | |
281 | struct nicvf *nic = (struct nicvf *)data; | |
282 | ||
283 | nicvf_refill_rbdr(nic, GFP_ATOMIC); | |
284 | if (nic->rb_alloc_fail) { | |
285 | nic->rb_work_scheduled = true; | |
286 | schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); | |
287 | } | |
288 | } | |
289 | ||
290 | /* Initialize completion queue */ | |
291 | static int nicvf_init_cmp_queue(struct nicvf *nic, | |
292 | struct cmp_queue *cq, int q_len) | |
293 | { | |
294 | int err; | |
295 | ||
296 | err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, | |
297 | NICVF_CQ_BASE_ALIGN_BYTES); | |
298 | if (err) | |
299 | return err; | |
300 | ||
301 | cq->desc = cq->dmem.base; | |
302 | cq->thresh = CMP_QUEUE_CQE_THRESH; | |
303 | nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; | |
304 | ||
305 | return 0; | |
306 | } | |
307 | ||
308 | static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) | |
309 | { | |
310 | if (!cq) | |
311 | return; | |
312 | if (!cq->dmem.base) | |
313 | return; | |
314 | ||
315 | nicvf_free_q_desc_mem(nic, &cq->dmem); | |
316 | } | |
317 | ||
318 | /* Initialize transmit queue */ | |
319 | static int nicvf_init_snd_queue(struct nicvf *nic, | |
320 | struct snd_queue *sq, int q_len) | |
321 | { | |
322 | int err; | |
323 | ||
324 | err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, | |
325 | NICVF_SQ_BASE_ALIGN_BYTES); | |
326 | if (err) | |
327 | return err; | |
328 | ||
329 | sq->desc = sq->dmem.base; | |
86ace693 | 330 | sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); |
fa1a6c93 AM |
331 | if (!sq->skbuff) |
332 | return -ENOMEM; | |
4863dea3 SG |
333 | sq->head = 0; |
334 | sq->tail = 0; | |
335 | atomic_set(&sq->free_cnt, q_len - 1); | |
336 | sq->thresh = SND_QUEUE_THRESH; | |
337 | ||
338 | /* Preallocate memory for TSO segment's header */ | |
339 | sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, | |
340 | q_len * TSO_HEADER_SIZE, | |
341 | &sq->tso_hdrs_phys, GFP_KERNEL); | |
342 | if (!sq->tso_hdrs) | |
343 | return -ENOMEM; | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
348 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | |
349 | { | |
350 | if (!sq) | |
351 | return; | |
352 | if (!sq->dmem.base) | |
353 | return; | |
354 | ||
355 | if (sq->tso_hdrs) | |
143ceb0b SG |
356 | dma_free_coherent(&nic->pdev->dev, |
357 | sq->dmem.q_len * TSO_HEADER_SIZE, | |
4863dea3 SG |
358 | sq->tso_hdrs, sq->tso_hdrs_phys); |
359 | ||
360 | kfree(sq->skbuff); | |
361 | nicvf_free_q_desc_mem(nic, &sq->dmem); | |
362 | } | |
363 | ||
364 | static void nicvf_reclaim_snd_queue(struct nicvf *nic, | |
365 | struct queue_set *qs, int qidx) | |
366 | { | |
367 | /* Disable send queue */ | |
368 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); | |
369 | /* Check if SQ is stopped */ | |
370 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) | |
371 | return; | |
372 | /* Reset send queue */ | |
373 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
374 | } | |
375 | ||
376 | static void nicvf_reclaim_rcv_queue(struct nicvf *nic, | |
377 | struct queue_set *qs, int qidx) | |
378 | { | |
379 | union nic_mbx mbx = {}; | |
380 | ||
381 | /* Make sure all packets in the pipeline are written back into mem */ | |
382 | mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; | |
383 | nicvf_send_msg_to_pf(nic, &mbx); | |
384 | } | |
385 | ||
386 | static void nicvf_reclaim_cmp_queue(struct nicvf *nic, | |
387 | struct queue_set *qs, int qidx) | |
388 | { | |
389 | /* Disable timer threshold (doesn't get reset upon CQ reset */ | |
390 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); | |
391 | /* Disable completion queue */ | |
392 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); | |
393 | /* Reset completion queue */ | |
394 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
395 | } | |
396 | ||
397 | static void nicvf_reclaim_rbdr(struct nicvf *nic, | |
398 | struct rbdr *rbdr, int qidx) | |
399 | { | |
400 | u64 tmp, fifo_state; | |
401 | int timeout = 10; | |
402 | ||
403 | /* Save head and tail pointers for feeing up buffers */ | |
404 | rbdr->head = nicvf_queue_reg_read(nic, | |
405 | NIC_QSET_RBDR_0_1_HEAD, | |
406 | qidx) >> 3; | |
407 | rbdr->tail = nicvf_queue_reg_read(nic, | |
408 | NIC_QSET_RBDR_0_1_TAIL, | |
409 | qidx) >> 3; | |
410 | ||
411 | /* If RBDR FIFO is in 'FAIL' state then do a reset first | |
412 | * before relaiming. | |
413 | */ | |
414 | fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); | |
415 | if (((fifo_state >> 62) & 0x03) == 0x3) | |
416 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
417 | qidx, NICVF_RBDR_RESET); | |
418 | ||
419 | /* Disable RBDR */ | |
420 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); | |
421 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
422 | return; | |
423 | while (1) { | |
424 | tmp = nicvf_queue_reg_read(nic, | |
425 | NIC_QSET_RBDR_0_1_PREFETCH_STATUS, | |
426 | qidx); | |
427 | if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) | |
428 | break; | |
429 | usleep_range(1000, 2000); | |
430 | timeout--; | |
431 | if (!timeout) { | |
432 | netdev_err(nic->netdev, | |
433 | "Failed polling on prefetch status\n"); | |
434 | return; | |
435 | } | |
436 | } | |
437 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
438 | qidx, NICVF_RBDR_RESET); | |
439 | ||
440 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) | |
441 | return; | |
442 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); | |
443 | if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) | |
444 | return; | |
445 | } | |
446 | ||
aa2e259b SG |
447 | void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) |
448 | { | |
449 | u64 rq_cfg; | |
450 | int sqs; | |
451 | ||
452 | rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0); | |
453 | ||
454 | /* Enable first VLAN stripping */ | |
455 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | |
456 | rq_cfg |= (1ULL << 25); | |
457 | else | |
458 | rq_cfg &= ~(1ULL << 25); | |
459 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); | |
460 | ||
461 | /* Configure Secondary Qsets, if any */ | |
462 | for (sqs = 0; sqs < nic->sqs_count; sqs++) | |
463 | if (nic->snicvf[sqs]) | |
464 | nicvf_queue_reg_write(nic->snicvf[sqs], | |
465 | NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); | |
466 | } | |
467 | ||
4863dea3 SG |
468 | /* Configures receive queue */ |
469 | static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, | |
470 | int qidx, bool enable) | |
471 | { | |
472 | union nic_mbx mbx = {}; | |
473 | struct rcv_queue *rq; | |
474 | struct rq_cfg rq_cfg; | |
475 | ||
476 | rq = &qs->rq[qidx]; | |
477 | rq->enable = enable; | |
478 | ||
479 | /* Disable receive queue */ | |
480 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); | |
481 | ||
482 | if (!rq->enable) { | |
483 | nicvf_reclaim_rcv_queue(nic, qs, qidx); | |
484 | return; | |
485 | } | |
486 | ||
487 | rq->cq_qs = qs->vnic_id; | |
488 | rq->cq_idx = qidx; | |
489 | rq->start_rbdr_qs = qs->vnic_id; | |
490 | rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
491 | rq->cont_rbdr_qs = qs->vnic_id; | |
492 | rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; | |
493 | /* all writes of RBDR data to be loaded into L2 Cache as well*/ | |
494 | rq->caching = 1; | |
495 | ||
496 | /* Send a mailbox msg to PF to config RQ */ | |
497 | mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; | |
498 | mbx.rq.qs_num = qs->vnic_id; | |
499 | mbx.rq.rq_num = qidx; | |
500 | mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | | |
501 | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | | |
502 | (rq->cont_qs_rbdr_idx << 8) | | |
503 | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); | |
504 | nicvf_send_msg_to_pf(nic, &mbx); | |
505 | ||
506 | mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; | |
507 | mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); | |
508 | nicvf_send_msg_to_pf(nic, &mbx); | |
509 | ||
510 | /* RQ drop config | |
511 | * Enable CQ drop to reserve sufficient CQEs for all tx packets | |
512 | */ | |
513 | mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; | |
514 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); | |
515 | nicvf_send_msg_to_pf(nic, &mbx); | |
516 | ||
aa2e259b SG |
517 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); |
518 | if (!nic->sqs_mode) | |
519 | nicvf_config_vlan_stripping(nic, nic->netdev->features); | |
4863dea3 SG |
520 | |
521 | /* Enable Receive queue */ | |
522 | rq_cfg.ena = 1; | |
523 | rq_cfg.tcp_ena = 0; | |
524 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); | |
525 | } | |
526 | ||
527 | /* Configures completion queue */ | |
528 | void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |
529 | int qidx, bool enable) | |
530 | { | |
531 | struct cmp_queue *cq; | |
532 | struct cq_cfg cq_cfg; | |
533 | ||
534 | cq = &qs->cq[qidx]; | |
535 | cq->enable = enable; | |
536 | ||
537 | if (!cq->enable) { | |
538 | nicvf_reclaim_cmp_queue(nic, qs, qidx); | |
539 | return; | |
540 | } | |
541 | ||
542 | /* Reset completion queue */ | |
543 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); | |
544 | ||
545 | if (!cq->enable) | |
546 | return; | |
547 | ||
548 | spin_lock_init(&cq->lock); | |
549 | /* Set completion queue base address */ | |
550 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, | |
551 | qidx, (u64)(cq->dmem.phys_base)); | |
552 | ||
553 | /* Enable Completion queue */ | |
554 | cq_cfg.ena = 1; | |
555 | cq_cfg.reset = 0; | |
556 | cq_cfg.caching = 0; | |
557 | cq_cfg.qsize = CMP_QSIZE; | |
558 | cq_cfg.avg_con = 0; | |
559 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); | |
560 | ||
561 | /* Set threshold value for interrupt generation */ | |
562 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | |
563 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | |
006394a7 | 564 | qidx, CMP_QUEUE_TIMER_THRESH); |
4863dea3 SG |
565 | } |
566 | ||
567 | /* Configures transmit queue */ | |
568 | static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, | |
569 | int qidx, bool enable) | |
570 | { | |
571 | union nic_mbx mbx = {}; | |
572 | struct snd_queue *sq; | |
573 | struct sq_cfg sq_cfg; | |
574 | ||
575 | sq = &qs->sq[qidx]; | |
576 | sq->enable = enable; | |
577 | ||
578 | if (!sq->enable) { | |
579 | nicvf_reclaim_snd_queue(nic, qs, qidx); | |
580 | return; | |
581 | } | |
582 | ||
583 | /* Reset send queue */ | |
584 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); | |
585 | ||
586 | sq->cq_qs = qs->vnic_id; | |
587 | sq->cq_idx = qidx; | |
588 | ||
589 | /* Send a mailbox msg to PF to config SQ */ | |
590 | mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; | |
591 | mbx.sq.qs_num = qs->vnic_id; | |
592 | mbx.sq.sq_num = qidx; | |
92dc8769 | 593 | mbx.sq.sqs_mode = nic->sqs_mode; |
4863dea3 SG |
594 | mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; |
595 | nicvf_send_msg_to_pf(nic, &mbx); | |
596 | ||
597 | /* Set queue base address */ | |
598 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, | |
599 | qidx, (u64)(sq->dmem.phys_base)); | |
600 | ||
601 | /* Enable send queue & set queue size */ | |
602 | sq_cfg.ena = 1; | |
603 | sq_cfg.reset = 0; | |
604 | sq_cfg.ldwb = 0; | |
605 | sq_cfg.qsize = SND_QSIZE; | |
606 | sq_cfg.tstmp_bgx_intf = 0; | |
607 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); | |
608 | ||
609 | /* Set threshold value for interrupt generation */ | |
610 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); | |
611 | ||
612 | /* Set queue:cpu affinity for better load distribution */ | |
613 | if (cpu_online(qidx)) { | |
614 | cpumask_set_cpu(qidx, &sq->affinity_mask); | |
615 | netif_set_xps_queue(nic->netdev, | |
616 | &sq->affinity_mask, qidx); | |
617 | } | |
618 | } | |
619 | ||
620 | /* Configures receive buffer descriptor ring */ | |
621 | static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, | |
622 | int qidx, bool enable) | |
623 | { | |
624 | struct rbdr *rbdr; | |
625 | struct rbdr_cfg rbdr_cfg; | |
626 | ||
627 | rbdr = &qs->rbdr[qidx]; | |
628 | nicvf_reclaim_rbdr(nic, rbdr, qidx); | |
629 | if (!enable) | |
630 | return; | |
631 | ||
632 | /* Set descriptor base address */ | |
633 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, | |
634 | qidx, (u64)(rbdr->dmem.phys_base)); | |
635 | ||
636 | /* Enable RBDR & set queue size */ | |
637 | /* Buffer size should be in multiples of 128 bytes */ | |
638 | rbdr_cfg.ena = 1; | |
639 | rbdr_cfg.reset = 0; | |
640 | rbdr_cfg.ldwb = 0; | |
641 | rbdr_cfg.qsize = RBDR_SIZE; | |
642 | rbdr_cfg.avg_con = 0; | |
643 | rbdr_cfg.lines = rbdr->dma_size / 128; | |
644 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, | |
645 | qidx, *(u64 *)&rbdr_cfg); | |
646 | ||
647 | /* Notify HW */ | |
648 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, | |
649 | qidx, qs->rbdr_len - 1); | |
650 | ||
651 | /* Set threshold value for interrupt generation */ | |
652 | nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, | |
653 | qidx, rbdr->thresh - 1); | |
654 | } | |
655 | ||
656 | /* Requests PF to assign and enable Qset */ | |
657 | void nicvf_qset_config(struct nicvf *nic, bool enable) | |
658 | { | |
659 | union nic_mbx mbx = {}; | |
660 | struct queue_set *qs = nic->qs; | |
661 | struct qs_cfg *qs_cfg; | |
662 | ||
663 | if (!qs) { | |
664 | netdev_warn(nic->netdev, | |
665 | "Qset is still not allocated, don't init queues\n"); | |
666 | return; | |
667 | } | |
668 | ||
669 | qs->enable = enable; | |
670 | qs->vnic_id = nic->vf_id; | |
671 | ||
672 | /* Send a mailbox msg to PF to config Qset */ | |
673 | mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; | |
674 | mbx.qs.num = qs->vnic_id; | |
92dc8769 | 675 | mbx.qs.sqs_count = nic->sqs_count; |
4863dea3 SG |
676 | |
677 | mbx.qs.cfg = 0; | |
678 | qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; | |
679 | if (qs->enable) { | |
680 | qs_cfg->ena = 1; | |
681 | #ifdef __BIG_ENDIAN | |
682 | qs_cfg->be = 1; | |
683 | #endif | |
684 | qs_cfg->vnic = qs->vnic_id; | |
685 | } | |
686 | nicvf_send_msg_to_pf(nic, &mbx); | |
687 | } | |
688 | ||
689 | static void nicvf_free_resources(struct nicvf *nic) | |
690 | { | |
691 | int qidx; | |
692 | struct queue_set *qs = nic->qs; | |
693 | ||
694 | /* Free receive buffer descriptor ring */ | |
695 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
696 | nicvf_free_rbdr(nic, &qs->rbdr[qidx]); | |
697 | ||
698 | /* Free completion queue */ | |
699 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
700 | nicvf_free_cmp_queue(nic, &qs->cq[qidx]); | |
701 | ||
702 | /* Free send queue */ | |
703 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
704 | nicvf_free_snd_queue(nic, &qs->sq[qidx]); | |
705 | } | |
706 | ||
707 | static int nicvf_alloc_resources(struct nicvf *nic) | |
708 | { | |
709 | int qidx; | |
710 | struct queue_set *qs = nic->qs; | |
711 | ||
712 | /* Alloc receive buffer descriptor ring */ | |
713 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { | |
714 | if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, | |
715 | DMA_BUFFER_LEN)) | |
716 | goto alloc_fail; | |
717 | } | |
718 | ||
719 | /* Alloc send queue */ | |
720 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) { | |
721 | if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) | |
722 | goto alloc_fail; | |
723 | } | |
724 | ||
725 | /* Alloc completion queue */ | |
726 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { | |
727 | if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) | |
728 | goto alloc_fail; | |
729 | } | |
730 | ||
731 | return 0; | |
732 | alloc_fail: | |
733 | nicvf_free_resources(nic); | |
734 | return -ENOMEM; | |
735 | } | |
736 | ||
737 | int nicvf_set_qset_resources(struct nicvf *nic) | |
738 | { | |
739 | struct queue_set *qs; | |
740 | ||
741 | qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); | |
742 | if (!qs) | |
743 | return -ENOMEM; | |
744 | nic->qs = qs; | |
745 | ||
746 | /* Set count of each queue */ | |
747 | qs->rbdr_cnt = RBDR_CNT; | |
748 | qs->rq_cnt = RCV_QUEUE_CNT; | |
749 | qs->sq_cnt = SND_QUEUE_CNT; | |
750 | qs->cq_cnt = CMP_QUEUE_CNT; | |
751 | ||
752 | /* Set queue lengths */ | |
753 | qs->rbdr_len = RCV_BUF_COUNT; | |
754 | qs->sq_len = SND_QUEUE_LEN; | |
755 | qs->cq_len = CMP_QUEUE_LEN; | |
92dc8769 SG |
756 | |
757 | nic->rx_queues = qs->rq_cnt; | |
758 | nic->tx_queues = qs->sq_cnt; | |
759 | ||
4863dea3 SG |
760 | return 0; |
761 | } | |
762 | ||
763 | int nicvf_config_data_transfer(struct nicvf *nic, bool enable) | |
764 | { | |
765 | bool disable = false; | |
766 | struct queue_set *qs = nic->qs; | |
767 | int qidx; | |
768 | ||
769 | if (!qs) | |
770 | return 0; | |
771 | ||
772 | if (enable) { | |
773 | if (nicvf_alloc_resources(nic)) | |
774 | return -ENOMEM; | |
775 | ||
776 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
777 | nicvf_snd_queue_config(nic, qs, qidx, enable); | |
778 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
779 | nicvf_cmp_queue_config(nic, qs, qidx, enable); | |
780 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
781 | nicvf_rbdr_config(nic, qs, qidx, enable); | |
782 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
783 | nicvf_rcv_queue_config(nic, qs, qidx, enable); | |
784 | } else { | |
785 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | |
786 | nicvf_rcv_queue_config(nic, qs, qidx, disable); | |
787 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | |
788 | nicvf_rbdr_config(nic, qs, qidx, disable); | |
789 | for (qidx = 0; qidx < qs->sq_cnt; qidx++) | |
790 | nicvf_snd_queue_config(nic, qs, qidx, disable); | |
791 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) | |
792 | nicvf_cmp_queue_config(nic, qs, qidx, disable); | |
793 | ||
794 | nicvf_free_resources(nic); | |
795 | } | |
796 | ||
797 | return 0; | |
798 | } | |
799 | ||
800 | /* Get a free desc from SQ | |
801 | * returns descriptor ponter & descriptor number | |
802 | */ | |
803 | static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) | |
804 | { | |
805 | int qentry; | |
806 | ||
807 | qentry = sq->tail; | |
808 | atomic_sub(desc_cnt, &sq->free_cnt); | |
809 | sq->tail += desc_cnt; | |
810 | sq->tail &= (sq->dmem.q_len - 1); | |
811 | ||
812 | return qentry; | |
813 | } | |
814 | ||
815 | /* Free descriptor back to SQ for future use */ | |
816 | void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) | |
817 | { | |
818 | atomic_add(desc_cnt, &sq->free_cnt); | |
819 | sq->head += desc_cnt; | |
820 | sq->head &= (sq->dmem.q_len - 1); | |
821 | } | |
822 | ||
823 | static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) | |
824 | { | |
825 | qentry++; | |
826 | qentry &= (sq->dmem.q_len - 1); | |
827 | return qentry; | |
828 | } | |
829 | ||
830 | void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) | |
831 | { | |
832 | u64 sq_cfg; | |
833 | ||
834 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
835 | sq_cfg |= NICVF_SQ_EN; | |
836 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
837 | /* Ring doorbell so that H/W restarts processing SQEs */ | |
838 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); | |
839 | } | |
840 | ||
841 | void nicvf_sq_disable(struct nicvf *nic, int qidx) | |
842 | { | |
843 | u64 sq_cfg; | |
844 | ||
845 | sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); | |
846 | sq_cfg &= ~NICVF_SQ_EN; | |
847 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); | |
848 | } | |
849 | ||
850 | void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, | |
851 | int qidx) | |
852 | { | |
853 | u64 head, tail; | |
854 | struct sk_buff *skb; | |
855 | struct nicvf *nic = netdev_priv(netdev); | |
856 | struct sq_hdr_subdesc *hdr; | |
857 | ||
858 | head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; | |
859 | tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; | |
860 | while (sq->head != head) { | |
861 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); | |
862 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { | |
863 | nicvf_put_sq_desc(sq, 1); | |
864 | continue; | |
865 | } | |
866 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | |
143ceb0b SG |
867 | if (skb) |
868 | dev_kfree_skb_any(skb); | |
4863dea3 SG |
869 | atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); |
870 | atomic64_add(hdr->tot_len, | |
871 | (atomic64_t *)&netdev->stats.tx_bytes); | |
4863dea3 SG |
872 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
873 | } | |
874 | } | |
875 | ||
876 | /* Calculate no of SQ subdescriptors needed to transmit all | |
877 | * segments of this TSO packet. | |
878 | * Taken from 'Tilera network driver' with a minor modification. | |
879 | */ | |
880 | static int nicvf_tso_count_subdescs(struct sk_buff *skb) | |
881 | { | |
882 | struct skb_shared_info *sh = skb_shinfo(skb); | |
883 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
884 | unsigned int data_len = skb->len - sh_len; | |
885 | unsigned int p_len = sh->gso_size; | |
886 | long f_id = -1; /* id of the current fragment */ | |
887 | long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ | |
888 | long f_used = 0; /* bytes used from the current fragment */ | |
889 | long n; /* size of the current piece of payload */ | |
890 | int num_edescs = 0; | |
891 | int segment; | |
892 | ||
893 | for (segment = 0; segment < sh->gso_segs; segment++) { | |
894 | unsigned int p_used = 0; | |
895 | ||
896 | /* One edesc for header and for each piece of the payload. */ | |
897 | for (num_edescs++; p_used < p_len; num_edescs++) { | |
898 | /* Advance as needed. */ | |
899 | while (f_used >= f_size) { | |
900 | f_id++; | |
901 | f_size = skb_frag_size(&sh->frags[f_id]); | |
902 | f_used = 0; | |
903 | } | |
904 | ||
905 | /* Use bytes from the current fragment. */ | |
906 | n = p_len - p_used; | |
907 | if (n > f_size - f_used) | |
908 | n = f_size - f_used; | |
909 | f_used += n; | |
910 | p_used += n; | |
911 | } | |
912 | ||
913 | /* The last segment may be less than gso_size. */ | |
914 | data_len -= p_len; | |
915 | if (data_len < p_len) | |
916 | p_len = data_len; | |
917 | } | |
918 | ||
919 | /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ | |
920 | return num_edescs + sh->gso_segs; | |
921 | } | |
922 | ||
923 | /* Get the number of SQ descriptors needed to xmit this skb */ | |
924 | static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) | |
925 | { | |
926 | int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; | |
927 | ||
928 | if (skb_shinfo(skb)->gso_size) { | |
929 | subdesc_cnt = nicvf_tso_count_subdescs(skb); | |
930 | return subdesc_cnt; | |
931 | } | |
932 | ||
933 | if (skb_shinfo(skb)->nr_frags) | |
934 | subdesc_cnt += skb_shinfo(skb)->nr_frags; | |
935 | ||
936 | return subdesc_cnt; | |
937 | } | |
938 | ||
939 | /* Add SQ HEADER subdescriptor. | |
940 | * First subdescriptor for every send descriptor. | |
941 | */ | |
942 | static inline void | |
943 | nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, | |
944 | int subdesc_cnt, struct sk_buff *skb, int len) | |
945 | { | |
946 | int proto; | |
947 | struct sq_hdr_subdesc *hdr; | |
948 | ||
949 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); | |
950 | sq->skbuff[qentry] = (u64)skb; | |
951 | ||
952 | memset(hdr, 0, SND_QUEUE_DESC_SIZE); | |
953 | hdr->subdesc_type = SQ_DESC_TYPE_HEADER; | |
954 | /* Enable notification via CQE after processing SQE */ | |
955 | hdr->post_cqe = 1; | |
956 | /* No of subdescriptors following this */ | |
957 | hdr->subdesc_cnt = subdesc_cnt; | |
958 | hdr->tot_len = len; | |
959 | ||
960 | /* Offload checksum calculation to HW */ | |
961 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
4863dea3 SG |
962 | hdr->csum_l3 = 1; /* Enable IP csum calculation */ |
963 | hdr->l3_offset = skb_network_offset(skb); | |
964 | hdr->l4_offset = skb_transport_offset(skb); | |
965 | ||
966 | proto = ip_hdr(skb)->protocol; | |
967 | switch (proto) { | |
968 | case IPPROTO_TCP: | |
969 | hdr->csum_l4 = SEND_L4_CSUM_TCP; | |
970 | break; | |
971 | case IPPROTO_UDP: | |
972 | hdr->csum_l4 = SEND_L4_CSUM_UDP; | |
973 | break; | |
974 | case IPPROTO_SCTP: | |
975 | hdr->csum_l4 = SEND_L4_CSUM_SCTP; | |
976 | break; | |
977 | } | |
978 | } | |
979 | } | |
980 | ||
981 | /* SQ GATHER subdescriptor | |
982 | * Must follow HDR descriptor | |
983 | */ | |
984 | static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, | |
985 | int size, u64 data) | |
986 | { | |
987 | struct sq_gather_subdesc *gather; | |
988 | ||
989 | qentry &= (sq->dmem.q_len - 1); | |
990 | gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); | |
991 | ||
992 | memset(gather, 0, SND_QUEUE_DESC_SIZE); | |
993 | gather->subdesc_type = SQ_DESC_TYPE_GATHER; | |
4b561c17 | 994 | gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; |
4863dea3 SG |
995 | gather->size = size; |
996 | gather->addr = data; | |
997 | } | |
998 | ||
999 | /* Segment a TSO packet into 'gso_size' segments and append | |
1000 | * them to SQ for transfer | |
1001 | */ | |
1002 | static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | |
92dc8769 | 1003 | int sq_num, int qentry, struct sk_buff *skb) |
4863dea3 SG |
1004 | { |
1005 | struct tso_t tso; | |
1006 | int seg_subdescs = 0, desc_cnt = 0; | |
1007 | int seg_len, total_len, data_left; | |
1008 | int hdr_qentry = qentry; | |
1009 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1010 | ||
1011 | tso_start(skb, &tso); | |
1012 | total_len = skb->len - hdr_len; | |
1013 | while (total_len > 0) { | |
1014 | char *hdr; | |
1015 | ||
1016 | /* Save Qentry for adding HDR_SUBDESC at the end */ | |
1017 | hdr_qentry = qentry; | |
1018 | ||
1019 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | |
1020 | total_len -= data_left; | |
1021 | ||
1022 | /* Add segment's header */ | |
1023 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1024 | hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; | |
1025 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | |
1026 | nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, | |
1027 | sq->tso_hdrs_phys + | |
1028 | qentry * TSO_HEADER_SIZE); | |
1029 | /* HDR_SUDESC + GATHER */ | |
1030 | seg_subdescs = 2; | |
1031 | seg_len = hdr_len; | |
1032 | ||
1033 | /* Add segment's payload fragments */ | |
1034 | while (data_left > 0) { | |
1035 | int size; | |
1036 | ||
1037 | size = min_t(int, tso.size, data_left); | |
1038 | ||
1039 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1040 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1041 | virt_to_phys(tso.data)); | |
1042 | seg_subdescs++; | |
1043 | seg_len += size; | |
1044 | ||
1045 | data_left -= size; | |
1046 | tso_build_data(skb, &tso, size); | |
1047 | } | |
1048 | nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, | |
1049 | seg_subdescs - 1, skb, seg_len); | |
143ceb0b | 1050 | sq->skbuff[hdr_qentry] = (u64)NULL; |
4863dea3 SG |
1051 | qentry = nicvf_get_nxt_sqentry(sq, qentry); |
1052 | ||
1053 | desc_cnt += seg_subdescs; | |
1054 | } | |
1055 | /* Save SKB in the last segment for freeing */ | |
1056 | sq->skbuff[hdr_qentry] = (u64)skb; | |
1057 | ||
1058 | /* make sure all memory stores are done before ringing doorbell */ | |
1059 | smp_wmb(); | |
1060 | ||
1061 | /* Inform HW to xmit all TSO segments */ | |
1062 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
92dc8769 | 1063 | sq_num, desc_cnt); |
2cb468e0 | 1064 | nic->drv_stats.tx_tso++; |
4863dea3 SG |
1065 | return 1; |
1066 | } | |
1067 | ||
1068 | /* Append an skb to a SQ for packet transfer. */ | |
1069 | int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) | |
1070 | { | |
1071 | int i, size; | |
1072 | int subdesc_cnt; | |
1073 | int sq_num, qentry; | |
92dc8769 | 1074 | struct queue_set *qs; |
4863dea3 SG |
1075 | struct snd_queue *sq; |
1076 | ||
1077 | sq_num = skb_get_queue_mapping(skb); | |
92dc8769 SG |
1078 | if (sq_num >= MAX_SND_QUEUES_PER_QS) { |
1079 | /* Get secondary Qset's SQ structure */ | |
1080 | i = sq_num / MAX_SND_QUEUES_PER_QS; | |
1081 | if (!nic->snicvf[i - 1]) { | |
1082 | netdev_warn(nic->netdev, | |
1083 | "Secondary Qset#%d's ptr not initialized\n", | |
1084 | i - 1); | |
1085 | return 1; | |
1086 | } | |
1087 | nic = (struct nicvf *)nic->snicvf[i - 1]; | |
1088 | sq_num = sq_num % MAX_SND_QUEUES_PER_QS; | |
1089 | } | |
1090 | ||
1091 | qs = nic->qs; | |
4863dea3 SG |
1092 | sq = &qs->sq[sq_num]; |
1093 | ||
1094 | subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); | |
1095 | if (subdesc_cnt > atomic_read(&sq->free_cnt)) | |
1096 | goto append_fail; | |
1097 | ||
1098 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | |
1099 | ||
1100 | /* Check if its a TSO packet */ | |
1101 | if (skb_shinfo(skb)->gso_size) | |
92dc8769 | 1102 | return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); |
4863dea3 SG |
1103 | |
1104 | /* Add SQ header subdesc */ | |
1105 | nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); | |
1106 | ||
1107 | /* Add SQ gather subdescs */ | |
1108 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1109 | size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; | |
1110 | nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); | |
1111 | ||
1112 | /* Check for scattered buffer */ | |
1113 | if (!skb_is_nonlinear(skb)) | |
1114 | goto doorbell; | |
1115 | ||
1116 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1117 | const struct skb_frag_struct *frag; | |
1118 | ||
1119 | frag = &skb_shinfo(skb)->frags[i]; | |
1120 | ||
1121 | qentry = nicvf_get_nxt_sqentry(sq, qentry); | |
1122 | size = skb_frag_size(frag); | |
1123 | nicvf_sq_add_gather_subdesc(sq, qentry, size, | |
1124 | virt_to_phys( | |
1125 | skb_frag_address(frag))); | |
1126 | } | |
1127 | ||
1128 | doorbell: | |
1129 | /* make sure all memory stores are done before ringing doorbell */ | |
1130 | smp_wmb(); | |
1131 | ||
1132 | /* Inform HW to xmit new packet */ | |
1133 | nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, | |
1134 | sq_num, subdesc_cnt); | |
1135 | return 1; | |
1136 | ||
1137 | append_fail: | |
92dc8769 SG |
1138 | /* Use original PCI dev for debug log */ |
1139 | nic = nic->pnicvf; | |
4863dea3 SG |
1140 | netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); |
1141 | return 0; | |
1142 | } | |
1143 | ||
1144 | static inline unsigned frag_num(unsigned i) | |
1145 | { | |
1146 | #ifdef __BIG_ENDIAN | |
1147 | return (i & ~3) + 3 - (i & 3); | |
1148 | #else | |
1149 | return i; | |
1150 | #endif | |
1151 | } | |
1152 | ||
1153 | /* Returns SKB for a received packet */ | |
1154 | struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |
1155 | { | |
1156 | int frag; | |
1157 | int payload_len = 0; | |
1158 | struct sk_buff *skb = NULL; | |
1159 | struct sk_buff *skb_frag = NULL; | |
1160 | struct sk_buff *prev_frag = NULL; | |
1161 | u16 *rb_lens = NULL; | |
1162 | u64 *rb_ptrs = NULL; | |
1163 | ||
1164 | rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); | |
1165 | rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); | |
1166 | ||
1167 | netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", | |
1168 | __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); | |
1169 | ||
1170 | for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { | |
1171 | payload_len = rb_lens[frag_num(frag)]; | |
1172 | if (!frag) { | |
1173 | /* First fragment */ | |
1174 | skb = nicvf_rb_ptr_to_skb(nic, | |
1175 | *rb_ptrs - cqe_rx->align_pad, | |
1176 | payload_len); | |
1177 | if (!skb) | |
1178 | return NULL; | |
1179 | skb_reserve(skb, cqe_rx->align_pad); | |
1180 | skb_put(skb, payload_len); | |
1181 | } else { | |
1182 | /* Add fragments */ | |
1183 | skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, | |
1184 | payload_len); | |
1185 | if (!skb_frag) { | |
1186 | dev_kfree_skb(skb); | |
1187 | return NULL; | |
1188 | } | |
1189 | ||
1190 | if (!skb_shinfo(skb)->frag_list) | |
1191 | skb_shinfo(skb)->frag_list = skb_frag; | |
1192 | else | |
1193 | prev_frag->next = skb_frag; | |
1194 | ||
1195 | prev_frag = skb_frag; | |
1196 | skb->len += payload_len; | |
1197 | skb->data_len += payload_len; | |
1198 | skb_frag->len = payload_len; | |
1199 | } | |
1200 | /* Next buffer pointer */ | |
1201 | rb_ptrs++; | |
1202 | } | |
1203 | return skb; | |
1204 | } | |
1205 | ||
b45ceb40 | 1206 | static u64 nicvf_int_type_to_mask(int int_type, int q_idx) |
4863dea3 SG |
1207 | { |
1208 | u64 reg_val; | |
1209 | ||
4863dea3 SG |
1210 | switch (int_type) { |
1211 | case NICVF_INTR_CQ: | |
b45ceb40 | 1212 | reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); |
4863dea3 SG |
1213 | break; |
1214 | case NICVF_INTR_SQ: | |
b45ceb40 | 1215 | reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); |
4863dea3 SG |
1216 | break; |
1217 | case NICVF_INTR_RBDR: | |
b45ceb40 | 1218 | reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); |
4863dea3 SG |
1219 | break; |
1220 | case NICVF_INTR_PKT_DROP: | |
b45ceb40 | 1221 | reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); |
4863dea3 SG |
1222 | break; |
1223 | case NICVF_INTR_TCP_TIMER: | |
b45ceb40 | 1224 | reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); |
4863dea3 SG |
1225 | break; |
1226 | case NICVF_INTR_MBOX: | |
b45ceb40 | 1227 | reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); |
4863dea3 SG |
1228 | break; |
1229 | case NICVF_INTR_QS_ERR: | |
b45ceb40 | 1230 | reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); |
4863dea3 SG |
1231 | break; |
1232 | default: | |
b45ceb40 | 1233 | reg_val = 0; |
4863dea3 SG |
1234 | } |
1235 | ||
b45ceb40 YN |
1236 | return reg_val; |
1237 | } | |
1238 | ||
1239 | /* Enable interrupt */ | |
1240 | void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1241 | { | |
1242 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); | |
1243 | ||
1244 | if (!mask) { | |
1245 | netdev_dbg(nic->netdev, | |
1246 | "Failed to enable interrupt: unknown type\n"); | |
1247 | return; | |
1248 | } | |
1249 | nicvf_reg_write(nic, NIC_VF_ENA_W1S, | |
1250 | nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask); | |
4863dea3 SG |
1251 | } |
1252 | ||
1253 | /* Disable interrupt */ | |
1254 | void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) | |
1255 | { | |
b45ceb40 | 1256 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
4863dea3 | 1257 | |
b45ceb40 YN |
1258 | if (!mask) { |
1259 | netdev_dbg(nic->netdev, | |
4863dea3 | 1260 | "Failed to disable interrupt: unknown type\n"); |
b45ceb40 | 1261 | return; |
4863dea3 SG |
1262 | } |
1263 | ||
b45ceb40 | 1264 | nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask); |
4863dea3 SG |
1265 | } |
1266 | ||
1267 | /* Clear interrupt */ | |
1268 | void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) | |
1269 | { | |
b45ceb40 | 1270 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
4863dea3 | 1271 | |
b45ceb40 YN |
1272 | if (!mask) { |
1273 | netdev_dbg(nic->netdev, | |
4863dea3 | 1274 | "Failed to clear interrupt: unknown type\n"); |
b45ceb40 | 1275 | return; |
4863dea3 SG |
1276 | } |
1277 | ||
b45ceb40 | 1278 | nicvf_reg_write(nic, NIC_VF_INT, mask); |
4863dea3 SG |
1279 | } |
1280 | ||
1281 | /* Check if interrupt is enabled */ | |
1282 | int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) | |
1283 | { | |
b45ceb40 YN |
1284 | u64 mask = nicvf_int_type_to_mask(int_type, q_idx); |
1285 | /* If interrupt type is unknown, we treat it disabled. */ | |
1286 | if (!mask) { | |
1287 | netdev_dbg(nic->netdev, | |
4863dea3 | 1288 | "Failed to check interrupt enable: unknown type\n"); |
b45ceb40 | 1289 | return 0; |
4863dea3 SG |
1290 | } |
1291 | ||
b45ceb40 | 1292 | return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S); |
4863dea3 SG |
1293 | } |
1294 | ||
1295 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) | |
1296 | { | |
1297 | struct rcv_queue *rq; | |
1298 | ||
1299 | #define GET_RQ_STATS(reg) \ | |
1300 | nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ | |
1301 | (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1302 | ||
1303 | rq = &nic->qs->rq[rq_idx]; | |
1304 | rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); | |
1305 | rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); | |
1306 | } | |
1307 | ||
1308 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | |
1309 | { | |
1310 | struct snd_queue *sq; | |
1311 | ||
1312 | #define GET_SQ_STATS(reg) \ | |
1313 | nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ | |
1314 | (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) | |
1315 | ||
1316 | sq = &nic->qs->sq[sq_idx]; | |
1317 | sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); | |
1318 | sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); | |
1319 | } | |
1320 | ||
1321 | /* Check for errors in the receive cmp.queue entry */ | |
1322 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, | |
1323 | struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) | |
1324 | { | |
a2dc5ded SG |
1325 | struct nicvf_hw_stats *stats = &nic->hw_stats; |
1326 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; | |
4863dea3 SG |
1327 | |
1328 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) { | |
a2dc5ded | 1329 | drv_stats->rx_frames_ok++; |
4863dea3 SG |
1330 | return 0; |
1331 | } | |
1332 | ||
1333 | if (netif_msg_rx_err(nic)) | |
1334 | netdev_err(nic->netdev, | |
1335 | "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", | |
1336 | nic->netdev->name, | |
1337 | cqe_rx->err_level, cqe_rx->err_opcode); | |
1338 | ||
4863dea3 SG |
1339 | switch (cqe_rx->err_opcode) { |
1340 | case CQ_RX_ERROP_RE_PARTIAL: | |
a2dc5ded | 1341 | stats->rx_bgx_truncated_pkts++; |
4863dea3 SG |
1342 | break; |
1343 | case CQ_RX_ERROP_RE_JABBER: | |
a2dc5ded | 1344 | stats->rx_jabber_errs++; |
4863dea3 SG |
1345 | break; |
1346 | case CQ_RX_ERROP_RE_FCS: | |
a2dc5ded | 1347 | stats->rx_fcs_errs++; |
4863dea3 SG |
1348 | break; |
1349 | case CQ_RX_ERROP_RE_RX_CTL: | |
a2dc5ded | 1350 | stats->rx_bgx_errs++; |
4863dea3 SG |
1351 | break; |
1352 | case CQ_RX_ERROP_PREL2_ERR: | |
a2dc5ded | 1353 | stats->rx_prel2_errs++; |
4863dea3 SG |
1354 | break; |
1355 | case CQ_RX_ERROP_L2_MAL: | |
a2dc5ded | 1356 | stats->rx_l2_hdr_malformed++; |
4863dea3 SG |
1357 | break; |
1358 | case CQ_RX_ERROP_L2_OVERSIZE: | |
a2dc5ded | 1359 | stats->rx_oversize++; |
4863dea3 SG |
1360 | break; |
1361 | case CQ_RX_ERROP_L2_UNDERSIZE: | |
a2dc5ded | 1362 | stats->rx_undersize++; |
4863dea3 SG |
1363 | break; |
1364 | case CQ_RX_ERROP_L2_LENMISM: | |
a2dc5ded | 1365 | stats->rx_l2_len_mismatch++; |
4863dea3 SG |
1366 | break; |
1367 | case CQ_RX_ERROP_L2_PCLP: | |
a2dc5ded | 1368 | stats->rx_l2_pclp++; |
4863dea3 SG |
1369 | break; |
1370 | case CQ_RX_ERROP_IP_NOT: | |
a2dc5ded | 1371 | stats->rx_ip_ver_errs++; |
4863dea3 SG |
1372 | break; |
1373 | case CQ_RX_ERROP_IP_CSUM_ERR: | |
a2dc5ded | 1374 | stats->rx_ip_csum_errs++; |
4863dea3 SG |
1375 | break; |
1376 | case CQ_RX_ERROP_IP_MAL: | |
a2dc5ded | 1377 | stats->rx_ip_hdr_malformed++; |
4863dea3 SG |
1378 | break; |
1379 | case CQ_RX_ERROP_IP_MALD: | |
a2dc5ded | 1380 | stats->rx_ip_payload_malformed++; |
4863dea3 SG |
1381 | break; |
1382 | case CQ_RX_ERROP_IP_HOP: | |
a2dc5ded | 1383 | stats->rx_ip_ttl_errs++; |
4863dea3 SG |
1384 | break; |
1385 | case CQ_RX_ERROP_L3_PCLP: | |
a2dc5ded | 1386 | stats->rx_l3_pclp++; |
4863dea3 SG |
1387 | break; |
1388 | case CQ_RX_ERROP_L4_MAL: | |
a2dc5ded | 1389 | stats->rx_l4_malformed++; |
4863dea3 SG |
1390 | break; |
1391 | case CQ_RX_ERROP_L4_CHK: | |
a2dc5ded | 1392 | stats->rx_l4_csum_errs++; |
4863dea3 SG |
1393 | break; |
1394 | case CQ_RX_ERROP_UDP_LEN: | |
a2dc5ded | 1395 | stats->rx_udp_len_errs++; |
4863dea3 SG |
1396 | break; |
1397 | case CQ_RX_ERROP_L4_PORT: | |
a2dc5ded | 1398 | stats->rx_l4_port_errs++; |
4863dea3 SG |
1399 | break; |
1400 | case CQ_RX_ERROP_TCP_FLAG: | |
a2dc5ded | 1401 | stats->rx_tcp_flag_errs++; |
4863dea3 SG |
1402 | break; |
1403 | case CQ_RX_ERROP_TCP_OFFSET: | |
a2dc5ded | 1404 | stats->rx_tcp_offset_errs++; |
4863dea3 SG |
1405 | break; |
1406 | case CQ_RX_ERROP_L4_PCLP: | |
a2dc5ded | 1407 | stats->rx_l4_pclp++; |
4863dea3 SG |
1408 | break; |
1409 | case CQ_RX_ERROP_RBDR_TRUNC: | |
a2dc5ded | 1410 | stats->rx_truncated_pkts++; |
4863dea3 SG |
1411 | break; |
1412 | } | |
1413 | ||
1414 | return 1; | |
1415 | } | |
1416 | ||
1417 | /* Check for errors in the send cmp.queue entry */ | |
1418 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | |
1419 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx) | |
1420 | { | |
1421 | struct cmp_queue_stats *stats = &cq->stats; | |
1422 | ||
1423 | switch (cqe_tx->send_status) { | |
1424 | case CQ_TX_ERROP_GOOD: | |
1425 | stats->tx.good++; | |
1426 | return 0; | |
1427 | case CQ_TX_ERROP_DESC_FAULT: | |
1428 | stats->tx.desc_fault++; | |
1429 | break; | |
1430 | case CQ_TX_ERROP_HDR_CONS_ERR: | |
1431 | stats->tx.hdr_cons_err++; | |
1432 | break; | |
1433 | case CQ_TX_ERROP_SUBDC_ERR: | |
1434 | stats->tx.subdesc_err++; | |
1435 | break; | |
1436 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: | |
1437 | stats->tx.imm_size_oflow++; | |
1438 | break; | |
1439 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: | |
1440 | stats->tx.data_seq_err++; | |
1441 | break; | |
1442 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: | |
1443 | stats->tx.mem_seq_err++; | |
1444 | break; | |
1445 | case CQ_TX_ERROP_LOCK_VIOL: | |
1446 | stats->tx.lock_viol++; | |
1447 | break; | |
1448 | case CQ_TX_ERROP_DATA_FAULT: | |
1449 | stats->tx.data_fault++; | |
1450 | break; | |
1451 | case CQ_TX_ERROP_TSTMP_CONFLICT: | |
1452 | stats->tx.tstmp_conflict++; | |
1453 | break; | |
1454 | case CQ_TX_ERROP_TSTMP_TIMEOUT: | |
1455 | stats->tx.tstmp_timeout++; | |
1456 | break; | |
1457 | case CQ_TX_ERROP_MEM_FAULT: | |
1458 | stats->tx.mem_fault++; | |
1459 | break; | |
1460 | case CQ_TX_ERROP_CK_OVERLAP: | |
1461 | stats->tx.csum_overlap++; | |
1462 | break; | |
1463 | case CQ_TX_ERROP_CK_OFLOW: | |
1464 | stats->tx.csum_overflow++; | |
1465 | break; | |
1466 | } | |
1467 | ||
1468 | return 1; | |
1469 | } |