vmxnet3: do not stop tx queues after netif_device_detach()
[linux-block.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
CommitLineData
d1a890fa
SB
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
69dbef0d 4 * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
d1a890fa
SB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
190af10f 23 * Maintained by: pv-drivers@vmware.com
d1a890fa
SB
24 *
25 */
26
9d9779e7 27#include <linux/module.h>
b038b040
SR
28#include <net/ip6_checksum.h>
29
d1a890fa
SB
30#include "vmxnet3_int.h"
31
32char vmxnet3_driver_name[] = "vmxnet3";
33#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
d1a890fa
SB
35/*
36 * PCI Device ID Table
37 * Last entry must be all 0s
38 */
9baa3c34 39static const struct pci_device_id vmxnet3_pciid_table[] = {
d1a890fa
SB
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41 {0}
42};
43
44MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
09c5088e 46static int enable_mq = 1;
d1a890fa 47
f9f25026
SB
48static void
49vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50
d1a890fa
SB
51/*
52 * Enable/Disable the given intr
53 */
54static void
55vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56{
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58}
59
60
61static void
62vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63{
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65}
66
67
68/*
69 * Enable/Disable all intrs used by the device
70 */
71static void
72vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73{
74 int i;
75
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
6929fe8a
RZ
78 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
80}
81
82
83static void
84vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85{
86 int i;
87
6929fe8a
RZ
88 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
d1a890fa
SB
90 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
92}
93
94
95static void
96vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97{
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99}
100
101
102static bool
103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104{
09c5088e 105 return tq->stopped;
d1a890fa
SB
106}
107
108
109static void
110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
09c5088e 113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
d1a890fa
SB
114}
115
116
117static void
118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = false;
09c5088e 121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
d1a890fa
SB
122}
123
124
125static void
126vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127{
128 tq->stopped = true;
129 tq->num_stop++;
09c5088e 130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
d1a890fa
SB
131}
132
133
134/*
135 * Check the link state. This may start or stop the tx queue.
136 */
137static void
4a1745fc 138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
d1a890fa
SB
139{
140 u32 ret;
09c5088e 141 int i;
83d0feff 142 unsigned long flags;
d1a890fa 143
83d0feff 144 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
83d0feff
SB
147 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
148
d1a890fa
SB
149 adapter->link_speed = ret >> 16;
150 if (ret & 1) { /* Link is up. */
204a6e65
SH
151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152 adapter->link_speed);
6cdd20c3 153 netif_carrier_on(adapter->netdev);
d1a890fa 154
09c5088e
SB
155 if (affectTxQueue) {
156 for (i = 0; i < adapter->num_tx_queues; i++)
157 vmxnet3_tq_start(&adapter->tx_queue[i],
158 adapter);
159 }
d1a890fa 160 } else {
204a6e65 161 netdev_info(adapter->netdev, "NIC Link is Down\n");
6cdd20c3 162 netif_carrier_off(adapter->netdev);
d1a890fa 163
09c5088e
SB
164 if (affectTxQueue) {
165 for (i = 0; i < adapter->num_tx_queues; i++)
166 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
167 }
d1a890fa
SB
168 }
169}
170
d1a890fa
SB
171static void
172vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173{
09c5088e 174 int i;
e328d410 175 unsigned long flags;
115924b6 176 u32 events = le32_to_cpu(adapter->shared->ecr);
d1a890fa
SB
177 if (!events)
178 return;
179
180 vmxnet3_ack_events(adapter, events);
181
182 /* Check if link state has changed */
183 if (events & VMXNET3_ECR_LINK)
4a1745fc 184 vmxnet3_check_link(adapter, true);
d1a890fa
SB
185
186 /* Check if there is an error on xmit/recv queues */
187 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
e328d410 188 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190 VMXNET3_CMD_GET_QUEUE_STATUS);
e328d410 191 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa 192
09c5088e
SB
193 for (i = 0; i < adapter->num_tx_queues; i++)
194 if (adapter->tqd_start[i].status.stopped)
195 dev_err(&adapter->netdev->dev,
196 "%s: tq[%d] error 0x%x\n",
197 adapter->netdev->name, i, le32_to_cpu(
198 adapter->tqd_start[i].status.error));
199 for (i = 0; i < adapter->num_rx_queues; i++)
200 if (adapter->rqd_start[i].status.stopped)
201 dev_err(&adapter->netdev->dev,
202 "%s: rq[%d] error 0x%x\n",
203 adapter->netdev->name, i,
204 adapter->rqd_start[i].status.error);
d1a890fa
SB
205
206 schedule_work(&adapter->work);
207 }
208}
209
115924b6
SB
210#ifdef __BIG_ENDIAN_BITFIELD
211/*
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
222 */
223static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224 struct Vmxnet3_RxDesc *dstDesc)
225{
226 u32 *src = (u32 *)srcDesc + 2;
227 u32 *dst = (u32 *)dstDesc + 2;
228 dstDesc->addr = le64_to_cpu(srcDesc->addr);
229 *dst = le32_to_cpu(*src);
230 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231}
232
233static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234 struct Vmxnet3_TxDesc *dstDesc)
235{
236 int i;
237 u32 *src = (u32 *)(srcDesc + 1);
238 u32 *dst = (u32 *)(dstDesc + 1);
239
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i = 2; i > 0; i--) {
242 src--;
243 dst--;
244 *dst = cpu_to_le32(*src);
245 }
246}
247
248
249static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250 struct Vmxnet3_RxCompDesc *dstDesc)
251{
252 int i = 0;
253 u32 *src = (u32 *)srcDesc;
254 u32 *dst = (u32 *)dstDesc;
255 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256 *dst = le32_to_cpu(*src);
257 src++;
258 dst++;
259 }
260}
261
262
263/* Used to read bitfield values from double words. */
264static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265{
266 u32 temp = le32_to_cpu(*bitfield);
267 u32 mask = ((1 << size) - 1) << pos;
268 temp &= mask;
269 temp >>= pos;
270 return temp;
271}
272
273
274
275#endif /* __BIG_ENDIAN_BITFIELD */
276
277#ifdef __BIG_ENDIAN_BITFIELD
278
279# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291 (dstrcd) = (tmp); \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
293 } while (0)
294# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295 (dstrxd) = (tmp); \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
297 } while (0)
298
299#else
300
301# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307
308#endif /* __BIG_ENDIAN_BITFIELD */
309
d1a890fa
SB
310
311static void
312vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev)
314{
315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
b0eb57cb 316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
bf7bec46 317 DMA_TO_DEVICE);
d1a890fa 318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
b0eb57cb 319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
bf7bec46 320 DMA_TO_DEVICE);
d1a890fa
SB
321 else
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323
324 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325}
326
327
328static int
329vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
331{
332 struct sk_buff *skb;
333 int entries = 0;
334
335 /* no out of order completion */
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
115924b6 337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
d1a890fa
SB
338
339 skb = tq->buf_info[eop_idx].skb;
340 BUG_ON(skb == NULL);
341 tq->buf_info[eop_idx].skb = NULL;
342
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344
345 while (tq->tx_ring.next2comp != eop_idx) {
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347 pdev);
348
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
353 */
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355 entries++;
356 }
357
358 dev_kfree_skb_any(skb);
359 return entries;
360}
361
362
363static int
364vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
366{
367 int completed = 0;
368 union Vmxnet3_GenericDesc *gdesc;
369
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
115924b6 371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
f3002c13 372 /* Prevent any &gdesc->tcd field from being (speculatively)
373 * read before (&gdesc->tcd)->gen is read.
374 */
375 dma_rmb();
376
115924b6
SB
377 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
378 &gdesc->tcd), tq, adapter->pdev,
379 adapter);
d1a890fa
SB
380
381 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
382 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
383 }
384
385 if (completed) {
386 spin_lock(&tq->tx_lock);
387 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
388 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
389 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
390 netif_carrier_ok(adapter->netdev))) {
391 vmxnet3_tq_wake(tq, adapter);
392 }
393 spin_unlock(&tq->tx_lock);
394 }
395 return completed;
396}
397
398
399static void
400vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
401 struct vmxnet3_adapter *adapter)
402{
403 int i;
404
405 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
406 struct vmxnet3_tx_buf_info *tbi;
d1a890fa
SB
407
408 tbi = tq->buf_info + tq->tx_ring.next2comp;
d1a890fa
SB
409
410 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
411 if (tbi->skb) {
412 dev_kfree_skb_any(tbi->skb);
413 tbi->skb = NULL;
414 }
415 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
416 }
417
418 /* sanity check, verify all buffers are indeed unmapped and freed */
419 for (i = 0; i < tq->tx_ring.size; i++) {
420 BUG_ON(tq->buf_info[i].skb != NULL ||
421 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
422 }
423
424 tq->tx_ring.gen = VMXNET3_INIT_GEN;
425 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
426
427 tq->comp_ring.gen = VMXNET3_INIT_GEN;
428 tq->comp_ring.next2proc = 0;
429}
430
431
09c5088e 432static void
d1a890fa
SB
433vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
434 struct vmxnet3_adapter *adapter)
435{
436 if (tq->tx_ring.base) {
b0eb57cb
AK
437 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
438 sizeof(struct Vmxnet3_TxDesc),
439 tq->tx_ring.base, tq->tx_ring.basePA);
d1a890fa
SB
440 tq->tx_ring.base = NULL;
441 }
442 if (tq->data_ring.base) {
3c8b3efc
SK
443 dma_free_coherent(&adapter->pdev->dev,
444 tq->data_ring.size * tq->txdata_desc_size,
b0eb57cb 445 tq->data_ring.base, tq->data_ring.basePA);
d1a890fa
SB
446 tq->data_ring.base = NULL;
447 }
448 if (tq->comp_ring.base) {
b0eb57cb
AK
449 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
450 sizeof(struct Vmxnet3_TxCompDesc),
451 tq->comp_ring.base, tq->comp_ring.basePA);
d1a890fa
SB
452 tq->comp_ring.base = NULL;
453 }
de1da8bc
RD
454 kfree(tq->buf_info);
455 tq->buf_info = NULL;
d1a890fa
SB
456}
457
458
09c5088e
SB
459/* Destroy all tx queues */
460void
461vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
462{
463 int i;
464
465 for (i = 0; i < adapter->num_tx_queues; i++)
466 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
467}
468
469
d1a890fa
SB
470static void
471vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
472 struct vmxnet3_adapter *adapter)
473{
474 int i;
475
476 /* reset the tx ring contents to 0 and reset the tx ring states */
477 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
478 sizeof(struct Vmxnet3_TxDesc));
479 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
480 tq->tx_ring.gen = VMXNET3_INIT_GEN;
481
3c8b3efc
SK
482 memset(tq->data_ring.base, 0,
483 tq->data_ring.size * tq->txdata_desc_size);
d1a890fa
SB
484
485 /* reset the tx comp ring contents to 0 and reset comp ring states */
486 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
487 sizeof(struct Vmxnet3_TxCompDesc));
488 tq->comp_ring.next2proc = 0;
489 tq->comp_ring.gen = VMXNET3_INIT_GEN;
490
491 /* reset the bookkeeping data */
492 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
493 for (i = 0; i < tq->tx_ring.size; i++)
494 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
495
496 /* stats are not reset */
497}
498
499
500static int
501vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
502 struct vmxnet3_adapter *adapter)
503{
504 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
505 tq->comp_ring.base || tq->buf_info);
506
b0eb57cb
AK
507 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
508 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
509 &tq->tx_ring.basePA, GFP_KERNEL);
d1a890fa 510 if (!tq->tx_ring.base) {
204a6e65 511 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
d1a890fa
SB
512 goto err;
513 }
514
b0eb57cb 515 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
3c8b3efc 516 tq->data_ring.size * tq->txdata_desc_size,
b0eb57cb 517 &tq->data_ring.basePA, GFP_KERNEL);
d1a890fa 518 if (!tq->data_ring.base) {
3c8b3efc 519 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
d1a890fa
SB
520 goto err;
521 }
522
b0eb57cb
AK
523 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
524 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
525 &tq->comp_ring.basePA, GFP_KERNEL);
d1a890fa 526 if (!tq->comp_ring.base) {
204a6e65 527 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
d1a890fa
SB
528 goto err;
529 }
530
de1da8bc
RD
531 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
532 GFP_KERNEL,
533 dev_to_node(&adapter->pdev->dev));
e404decb 534 if (!tq->buf_info)
d1a890fa 535 goto err;
d1a890fa
SB
536
537 return 0;
538
539err:
540 vmxnet3_tq_destroy(tq, adapter);
541 return -ENOMEM;
542}
543
09c5088e
SB
544static void
545vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
546{
547 int i;
548
549 for (i = 0; i < adapter->num_tx_queues; i++)
550 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
551}
d1a890fa
SB
552
553/*
554 * starting from ring->next2fill, allocate rx buffers for the given ring
555 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
556 * are allocated or allocation fails
557 */
558
559static int
560vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
561 int num_to_alloc, struct vmxnet3_adapter *adapter)
562{
563 int num_allocated = 0;
564 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
565 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
566 u32 val;
567
5318d809 568 while (num_allocated <= num_to_alloc) {
d1a890fa
SB
569 struct vmxnet3_rx_buf_info *rbi;
570 union Vmxnet3_GenericDesc *gd;
571
572 rbi = rbi_base + ring->next2fill;
573 gd = ring->base + ring->next2fill;
574
575 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
576 if (rbi->skb == NULL) {
0d735f13
SH
577 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
578 rbi->len,
579 GFP_KERNEL);
d1a890fa
SB
580 if (unlikely(rbi->skb == NULL)) {
581 rq->stats.rx_buf_alloc_failure++;
582 break;
583 }
d1a890fa 584
b0eb57cb
AK
585 rbi->dma_addr = dma_map_single(
586 &adapter->pdev->dev,
d1a890fa 587 rbi->skb->data, rbi->len,
bf7bec46 588 DMA_FROM_DEVICE);
5738a09d
AK
589 if (dma_mapping_error(&adapter->pdev->dev,
590 rbi->dma_addr)) {
591 dev_kfree_skb_any(rbi->skb);
592 rq->stats.rx_buf_alloc_failure++;
593 break;
594 }
d1a890fa
SB
595 } else {
596 /* rx buffer skipped by the device */
597 }
598 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
599 } else {
600 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
601 rbi->len != PAGE_SIZE);
602
603 if (rbi->page == NULL) {
604 rbi->page = alloc_page(GFP_ATOMIC);
605 if (unlikely(rbi->page == NULL)) {
606 rq->stats.rx_buf_alloc_failure++;
607 break;
608 }
b0eb57cb
AK
609 rbi->dma_addr = dma_map_page(
610 &adapter->pdev->dev,
d1a890fa 611 rbi->page, 0, PAGE_SIZE,
bf7bec46 612 DMA_FROM_DEVICE);
5738a09d
AK
613 if (dma_mapping_error(&adapter->pdev->dev,
614 rbi->dma_addr)) {
615 put_page(rbi->page);
616 rq->stats.rx_buf_alloc_failure++;
617 break;
618 }
d1a890fa
SB
619 } else {
620 /* rx buffers skipped by the device */
621 }
622 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
623 }
624
115924b6 625 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
5318d809 626 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
115924b6 627 | val | rbi->len);
d1a890fa 628
5318d809
SB
629 /* Fill the last buffer but dont mark it ready, or else the
630 * device will think that the queue is full */
631 if (num_allocated == num_to_alloc)
632 break;
633
634 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
d1a890fa
SB
635 num_allocated++;
636 vmxnet3_cmd_ring_adv_next2fill(ring);
637 }
d1a890fa 638
fdcd79b9 639 netdev_dbg(adapter->netdev,
69b9a712
SH
640 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
641 num_allocated, ring->next2fill, ring->next2comp);
d1a890fa
SB
642
643 /* so that the device can distinguish a full ring and an empty ring */
644 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
645
646 return num_allocated;
647}
648
649
650static void
651vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
652 struct vmxnet3_rx_buf_info *rbi)
653{
d7840976 654 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
d1a890fa
SB
655
656 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
657
0e0634d2 658 __skb_frag_set_page(frag, rbi->page);
b54c9d5b 659 skb_frag_off_set(frag, 0);
9e903e08
ED
660 skb_frag_size_set(frag, rcd->len);
661 skb->data_len += rcd->len;
5e6c355c 662 skb->truesize += PAGE_SIZE;
d1a890fa
SB
663 skb_shinfo(skb)->nr_frags++;
664}
665
666
5738a09d 667static int
d1a890fa
SB
668vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
669 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
670 struct vmxnet3_adapter *adapter)
671{
672 u32 dw2, len;
673 unsigned long buf_offset;
674 int i;
675 union Vmxnet3_GenericDesc *gdesc;
676 struct vmxnet3_tx_buf_info *tbi = NULL;
677
678 BUG_ON(ctx->copy_size > skb_headlen(skb));
679
680 /* use the previous gen bit for the SOP desc */
681 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
682
683 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
684 gdesc = ctx->sop_txd; /* both loops below can be skipped */
685
686 /* no need to map the buffer if headers are copied */
687 if (ctx->copy_size) {
115924b6 688 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
d1a890fa 689 tq->tx_ring.next2fill *
3c8b3efc 690 tq->txdata_desc_size);
115924b6 691 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
d1a890fa
SB
692 ctx->sop_txd->dword[3] = 0;
693
694 tbi = tq->buf_info + tq->tx_ring.next2fill;
695 tbi->map_type = VMXNET3_MAP_NONE;
696
fdcd79b9 697 netdev_dbg(adapter->netdev,
f6965582 698 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
115924b6
SB
699 tq->tx_ring.next2fill,
700 le64_to_cpu(ctx->sop_txd->txd.addr),
d1a890fa
SB
701 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
702 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
703
704 /* use the right gen for non-SOP desc */
705 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
706 }
707
708 /* linear part can use multiple tx desc if it's big */
709 len = skb_headlen(skb) - ctx->copy_size;
710 buf_offset = ctx->copy_size;
711 while (len) {
712 u32 buf_size;
713
1f4b1612
BD
714 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
715 buf_size = len;
716 dw2 |= len;
717 } else {
718 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
719 /* spec says that for TxDesc.len, 0 == 2^14 */
720 }
d1a890fa
SB
721
722 tbi = tq->buf_info + tq->tx_ring.next2fill;
723 tbi->map_type = VMXNET3_MAP_SINGLE;
b0eb57cb 724 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
d1a890fa 725 skb->data + buf_offset, buf_size,
bf7bec46 726 DMA_TO_DEVICE);
5738a09d
AK
727 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
728 return -EFAULT;
d1a890fa 729
1f4b1612 730 tbi->len = buf_size;
d1a890fa
SB
731
732 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
733 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
734
115924b6 735 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
1f4b1612 736 gdesc->dword[2] = cpu_to_le32(dw2);
d1a890fa
SB
737 gdesc->dword[3] = 0;
738
fdcd79b9 739 netdev_dbg(adapter->netdev,
f6965582 740 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
115924b6
SB
741 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
742 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
d1a890fa
SB
743 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
744 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
745
746 len -= buf_size;
747 buf_offset += buf_size;
748 }
749
750 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
d7840976 751 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a4d7e485 752 u32 buf_size;
d1a890fa 753
a4d7e485
ED
754 buf_offset = 0;
755 len = skb_frag_size(frag);
756 while (len) {
757 tbi = tq->buf_info + tq->tx_ring.next2fill;
758 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
759 buf_size = len;
760 dw2 |= len;
761 } else {
762 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
763 /* spec says that for TxDesc.len, 0 == 2^14 */
764 }
765 tbi->map_type = VMXNET3_MAP_PAGE;
766 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
767 buf_offset, buf_size,
768 DMA_TO_DEVICE);
5738a09d
AK
769 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
770 return -EFAULT;
d1a890fa 771
a4d7e485 772 tbi->len = buf_size;
d1a890fa 773
a4d7e485
ED
774 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
775 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
d1a890fa 776
a4d7e485
ED
777 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
778 gdesc->dword[2] = cpu_to_le32(dw2);
779 gdesc->dword[3] = 0;
d1a890fa 780
fdcd79b9 781 netdev_dbg(adapter->netdev,
8b429468 782 "txd[%u]: 0x%llx %u %u\n",
a4d7e485
ED
783 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
784 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
785 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
786 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
787
788 len -= buf_size;
789 buf_offset += buf_size;
790 }
d1a890fa
SB
791 }
792
793 ctx->eop_txd = gdesc;
794
795 /* set the last buf_info for the pkt */
796 tbi->skb = skb;
797 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
5738a09d
AK
798
799 return 0;
d1a890fa
SB
800}
801
802
09c5088e
SB
803/* Init all tx queues */
804static void
805vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
806{
807 int i;
808
809 for (i = 0; i < adapter->num_tx_queues; i++)
810 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
811}
812
813
d1a890fa 814/*
cec05562 815 * parse relevant protocol headers:
d1a890fa
SB
816 * For a tso pkt, relevant headers are L2/3/4 including options
817 * For a pkt requesting csum offloading, they are L2/3 and may include L4
818 * if it's a TCP/UDP pkt
819 *
820 * Returns:
821 * -1: error happens during parsing
822 * 0: protocol headers parsed, but too big to be copied
823 * 1: protocol headers parsed and copied
824 *
825 * Other effects:
826 * 1. related *ctx fields are updated.
827 * 2. ctx->copy_size is # of bytes copied
cec05562 828 * 3. the portion to be copied is guaranteed to be in the linear part
d1a890fa
SB
829 *
830 */
831static int
cec05562
NH
832vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
833 struct vmxnet3_tx_ctx *ctx,
834 struct vmxnet3_adapter *adapter)
d1a890fa 835{
759c9359 836 u8 protocol = 0;
d1a890fa 837
0d0b1672 838 if (ctx->mss) { /* TSO */
dacce2be
RD
839 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
840 ctx->l4_offset = skb_inner_transport_offset(skb);
841 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
842 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
843 } else {
844 ctx->l4_offset = skb_transport_offset(skb);
845 ctx->l4_hdr_size = tcp_hdrlen(skb);
846 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
847 }
d1a890fa 848 } else {
d1a890fa 849 if (skb->ip_summed == CHECKSUM_PARTIAL) {
dacce2be
RD
850 /* For encap packets, skb_checksum_start_offset refers
851 * to inner L4 offset. Thus, below works for encap as
852 * well as non-encap case
853 */
854 ctx->l4_offset = skb_checksum_start_offset(skb);
d1a890fa 855
36432797
RD
856 if (VMXNET3_VERSION_GE_4(adapter) &&
857 skb->encapsulation) {
858 struct iphdr *iph = inner_ip_hdr(skb);
8bca5d1e 859
36432797
RD
860 if (iph->version == 4) {
861 protocol = iph->protocol;
862 } else {
863 const struct ipv6hdr *ipv6h;
759c9359 864
36432797
RD
865 ipv6h = inner_ipv6_hdr(skb);
866 protocol = ipv6h->nexthdr;
867 }
868 } else {
869 if (ctx->ipv4) {
870 const struct iphdr *iph = ip_hdr(skb);
871
872 protocol = iph->protocol;
873 } else if (ctx->ipv6) {
874 const struct ipv6hdr *ipv6h;
875
876 ipv6h = ipv6_hdr(skb);
877 protocol = ipv6h->nexthdr;
878 }
759c9359
SK
879 }
880
881 switch (protocol) {
882 case IPPROTO_TCP:
8a7f280f
RD
883 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
884 tcp_hdrlen(skb);
759c9359
SK
885 break;
886 case IPPROTO_UDP:
887 ctx->l4_hdr_size = sizeof(struct udphdr);
888 break;
889 default:
d1a890fa 890 ctx->l4_hdr_size = 0;
759c9359 891 break;
d1a890fa 892 }
759c9359 893
dacce2be 894 ctx->copy_size = min(ctx->l4_offset +
b203262d 895 ctx->l4_hdr_size, skb->len);
d1a890fa 896 } else {
dacce2be 897 ctx->l4_offset = 0;
d1a890fa
SB
898 ctx->l4_hdr_size = 0;
899 /* copy as much as allowed */
3c8b3efc
SK
900 ctx->copy_size = min_t(unsigned int,
901 tq->txdata_desc_size,
902 skb_headlen(skb));
d1a890fa
SB
903 }
904
c41fcce9
SB
905 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
906 ctx->copy_size = skb->len;
907
d1a890fa
SB
908 /* make sure headers are accessible directly */
909 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
910 goto err;
911 }
912
3c8b3efc 913 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
d1a890fa
SB
914 tq->stats.oversized_hdr++;
915 ctx->copy_size = 0;
916 return 0;
917 }
918
cec05562
NH
919 return 1;
920err:
921 return -1;
922}
923
924/*
925 * copy relevant protocol headers to the transmit ring:
926 * For a tso pkt, relevant headers are L2/3/4 including options
927 * For a pkt requesting csum offloading, they are L2/3 and may include L4
928 * if it's a TCP/UDP pkt
929 *
930 *
931 * Note that this requires that vmxnet3_parse_hdr be called first to set the
932 * appropriate bits in ctx first
933 */
934static void
935vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
936 struct vmxnet3_tx_ctx *ctx,
937 struct vmxnet3_adapter *adapter)
938{
939 struct Vmxnet3_TxDataDesc *tdd;
940
ff2e7d5d
SK
941 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
942 tq->tx_ring.next2fill *
943 tq->txdata_desc_size);
d1a890fa
SB
944
945 memcpy(tdd->data, skb->data, ctx->copy_size);
fdcd79b9 946 netdev_dbg(adapter->netdev,
f6965582 947 "copy %u bytes to dataRing[%u]\n",
d1a890fa 948 ctx->copy_size, tq->tx_ring.next2fill);
d1a890fa
SB
949}
950
951
dacce2be
RD
952static void
953vmxnet3_prepare_inner_tso(struct sk_buff *skb,
954 struct vmxnet3_tx_ctx *ctx)
955{
956 struct tcphdr *tcph = inner_tcp_hdr(skb);
957 struct iphdr *iph = inner_ip_hdr(skb);
958
36432797 959 if (iph->version == 4) {
dacce2be
RD
960 iph->check = 0;
961 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
962 IPPROTO_TCP, 0);
36432797 963 } else {
dacce2be
RD
964 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
965
966 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
967 IPPROTO_TCP, 0);
968 }
969}
970
d1a890fa
SB
971static void
972vmxnet3_prepare_tso(struct sk_buff *skb,
973 struct vmxnet3_tx_ctx *ctx)
974{
8bca5d1e
ED
975 struct tcphdr *tcph = tcp_hdr(skb);
976
d1a890fa 977 if (ctx->ipv4) {
8bca5d1e
ED
978 struct iphdr *iph = ip_hdr(skb);
979
d1a890fa
SB
980 iph->check = 0;
981 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
982 IPPROTO_TCP, 0);
759c9359 983 } else if (ctx->ipv6) {
091c9f82 984 tcp_v6_gso_csum_prep(skb);
d1a890fa
SB
985 }
986}
987
a4d7e485
ED
988static int txd_estimate(const struct sk_buff *skb)
989{
990 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
991 int i;
992
993 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
d7840976 994 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a4d7e485
ED
995
996 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
997 }
998 return count;
999}
d1a890fa
SB
1000
1001/*
1002 * Transmits a pkt thru a given tq
1003 * Returns:
1004 * NETDEV_TX_OK: descriptors are setup successfully
25985edc 1005 * NETDEV_TX_OK: error occurred, the pkt is dropped
d1a890fa
SB
1006 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1007 *
1008 * Side-effects:
1009 * 1. tx ring may be changed
1010 * 2. tq stats may be updated accordingly
1011 * 3. shared->txNumDeferred may be updated
1012 */
1013
1014static int
1015vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1016 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1017{
1018 int ret;
1019 u32 count;
7a4c003d
RD
1020 int num_pkts;
1021 int tx_num_deferred;
d1a890fa
SB
1022 unsigned long flags;
1023 struct vmxnet3_tx_ctx ctx;
1024 union Vmxnet3_GenericDesc *gdesc;
115924b6
SB
1025#ifdef __BIG_ENDIAN_BITFIELD
1026 /* Use temporary descriptor to avoid touching bits multiple times */
1027 union Vmxnet3_GenericDesc tempTxDesc;
1028#endif
d1a890fa 1029
a4d7e485 1030 count = txd_estimate(skb);
d1a890fa 1031
72e85c45 1032 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
759c9359 1033 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
d1a890fa
SB
1034
1035 ctx.mss = skb_shinfo(skb)->gso_size;
1036 if (ctx.mss) {
1037 if (skb_header_cloned(skb)) {
1038 if (unlikely(pskb_expand_head(skb, 0, 0,
1039 GFP_ATOMIC) != 0)) {
1040 tq->stats.drop_tso++;
1041 goto drop_pkt;
1042 }
1043 tq->stats.copy_skb_header++;
1044 }
dacce2be
RD
1045 if (skb->encapsulation) {
1046 vmxnet3_prepare_inner_tso(skb, &ctx);
1047 } else {
1048 vmxnet3_prepare_tso(skb, &ctx);
1049 }
d1a890fa
SB
1050 } else {
1051 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1052
1053 /* non-tso pkts must not use more than
1054 * VMXNET3_MAX_TXD_PER_PKT entries
1055 */
1056 if (skb_linearize(skb) != 0) {
1057 tq->stats.drop_too_many_frags++;
1058 goto drop_pkt;
1059 }
1060 tq->stats.linearized++;
1061
1062 /* recalculate the # of descriptors to use */
1063 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1064 }
1065 }
1066
cec05562 1067 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
d1a890fa
SB
1068 if (ret >= 0) {
1069 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1070 /* hdrs parsed, check against other limits */
1071 if (ctx.mss) {
dacce2be 1072 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
d1a890fa 1073 VMXNET3_MAX_TX_BUF_SIZE)) {
efc21d95
AB
1074 tq->stats.drop_oversized_hdr++;
1075 goto drop_pkt;
d1a890fa
SB
1076 }
1077 } else {
1078 if (skb->ip_summed == CHECKSUM_PARTIAL) {
dacce2be 1079 if (unlikely(ctx.l4_offset +
d1a890fa
SB
1080 skb->csum_offset >
1081 VMXNET3_MAX_CSUM_OFFSET)) {
efc21d95
AB
1082 tq->stats.drop_oversized_hdr++;
1083 goto drop_pkt;
d1a890fa
SB
1084 }
1085 }
1086 }
1087 } else {
1088 tq->stats.drop_hdr_inspect_err++;
cec05562 1089 goto drop_pkt;
d1a890fa
SB
1090 }
1091
cec05562
NH
1092 spin_lock_irqsave(&tq->tx_lock, flags);
1093
1094 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1095 tq->stats.tx_ring_full++;
1096 netdev_dbg(adapter->netdev,
1097 "tx queue stopped on %s, next2comp %u"
1098 " next2fill %u\n", adapter->netdev->name,
1099 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1100
1101 vmxnet3_tq_stop(tq, adapter);
1102 spin_unlock_irqrestore(&tq->tx_lock, flags);
1103 return NETDEV_TX_BUSY;
1104 }
1105
1106
1107 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1108
d1a890fa 1109 /* fill tx descs related to addr & len */
5738a09d
AK
1110 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1111 goto unlock_drop_pkt;
d1a890fa
SB
1112
1113 /* setup the EOP desc */
115924b6 1114 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
d1a890fa
SB
1115
1116 /* setup the SOP desc */
115924b6
SB
1117#ifdef __BIG_ENDIAN_BITFIELD
1118 gdesc = &tempTxDesc;
1119 gdesc->dword[2] = ctx.sop_txd->dword[2];
1120 gdesc->dword[3] = ctx.sop_txd->dword[3];
1121#else
d1a890fa 1122 gdesc = ctx.sop_txd;
115924b6 1123#endif
7a4c003d 1124 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
d1a890fa 1125 if (ctx.mss) {
dacce2be
RD
1126 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1127 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1128 gdesc->txd.om = VMXNET3_OM_ENCAP;
1129 gdesc->txd.msscof = ctx.mss;
1130
1dac3b1b 1131 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
dacce2be
RD
1132 gdesc->txd.oco = 1;
1133 } else {
1134 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1135 gdesc->txd.om = VMXNET3_OM_TSO;
1136 gdesc->txd.msscof = ctx.mss;
1137 }
7a4c003d 1138 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
d1a890fa
SB
1139 } else {
1140 if (skb->ip_summed == CHECKSUM_PARTIAL) {
dacce2be
RD
1141 if (VMXNET3_VERSION_GE_4(adapter) &&
1142 skb->encapsulation) {
1143 gdesc->txd.hlen = ctx.l4_offset +
1144 ctx.l4_hdr_size;
1145 gdesc->txd.om = VMXNET3_OM_ENCAP;
1146 gdesc->txd.msscof = 0; /* Reserved */
1147 } else {
1148 gdesc->txd.hlen = ctx.l4_offset;
1149 gdesc->txd.om = VMXNET3_OM_CSUM;
1150 gdesc->txd.msscof = ctx.l4_offset +
1151 skb->csum_offset;
1152 }
d1a890fa
SB
1153 } else {
1154 gdesc->txd.om = 0;
1155 gdesc->txd.msscof = 0;
1156 }
7a4c003d 1157 num_pkts = 1;
d1a890fa 1158 }
7a4c003d
RD
1159 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1160 tx_num_deferred += num_pkts;
d1a890fa 1161
df8a39de 1162 if (skb_vlan_tag_present(skb)) {
d1a890fa 1163 gdesc->txd.ti = 1;
df8a39de 1164 gdesc->txd.tci = skb_vlan_tag_get(skb);
d1a890fa
SB
1165 }
1166
f3002c13 1167 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1168 * all other writes to &gdesc->txd.
1169 */
1170 dma_wmb();
1171
115924b6
SB
1172 /* finally flips the GEN bit of the SOP desc. */
1173 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1174 VMXNET3_TXD_GEN);
1175#ifdef __BIG_ENDIAN_BITFIELD
1176 /* Finished updating in bitfields of Tx Desc, so write them in original
1177 * place.
1178 */
1179 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1180 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1181 gdesc = ctx.sop_txd;
1182#endif
fdcd79b9 1183 netdev_dbg(adapter->netdev,
f6965582 1184 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
c2fd03a0 1185 (u32)(ctx.sop_txd -
115924b6
SB
1186 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1187 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
d1a890fa
SB
1188
1189 spin_unlock_irqrestore(&tq->tx_lock, flags);
1190
7a4c003d 1191 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
d1a890fa 1192 tq->shared->txNumDeferred = 0;
09c5088e
SB
1193 VMXNET3_WRITE_BAR0_REG(adapter,
1194 VMXNET3_REG_TXPROD + tq->qid * 8,
d1a890fa
SB
1195 tq->tx_ring.next2fill);
1196 }
d1a890fa
SB
1197
1198 return NETDEV_TX_OK;
1199
f955e141
DC
1200unlock_drop_pkt:
1201 spin_unlock_irqrestore(&tq->tx_lock, flags);
d1a890fa
SB
1202drop_pkt:
1203 tq->stats.drop_total++;
b1b71817 1204 dev_kfree_skb_any(skb);
d1a890fa
SB
1205 return NETDEV_TX_OK;
1206}
1207
1208
1209static netdev_tx_t
1210vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1211{
1212 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 1213
96800ee7 1214 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1215 return vmxnet3_tq_xmit(skb,
1216 &adapter->tx_queue[skb->queue_mapping],
1217 adapter, netdev);
d1a890fa
SB
1218}
1219
1220
1221static void
1222vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1223 struct sk_buff *skb,
1224 union Vmxnet3_GenericDesc *gdesc)
1225{
a0d2730c 1226 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
f0d43780
SK
1227 if (gdesc->rcd.v4 &&
1228 (le32_to_cpu(gdesc->dword[3]) &
1229 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1230 skb->ip_summed = CHECKSUM_UNNECESSARY;
dacce2be
RD
1231 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1232 !(le32_to_cpu(gdesc->dword[0]) &
1233 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1234 WARN_ON_ONCE(gdesc->rcd.frg &&
1235 !(le32_to_cpu(gdesc->dword[0]) &
1236 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
f0d43780
SK
1237 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1238 (1 << VMXNET3_RCD_TUC_SHIFT))) {
d1a890fa 1239 skb->ip_summed = CHECKSUM_UNNECESSARY;
dacce2be
RD
1240 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1241 !(le32_to_cpu(gdesc->dword[0]) &
1242 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1243 WARN_ON_ONCE(gdesc->rcd.frg &&
1244 !(le32_to_cpu(gdesc->dword[0]) &
1245 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
d1a890fa
SB
1246 } else {
1247 if (gdesc->rcd.csum) {
1248 skb->csum = htons(gdesc->rcd.csum);
1249 skb->ip_summed = CHECKSUM_PARTIAL;
1250 } else {
bc8acf2c 1251 skb_checksum_none_assert(skb);
d1a890fa
SB
1252 }
1253 }
1254 } else {
bc8acf2c 1255 skb_checksum_none_assert(skb);
d1a890fa
SB
1256 }
1257}
1258
1259
1260static void
1261vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1262 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1263{
1264 rq->stats.drop_err++;
1265 if (!rcd->fcs)
1266 rq->stats.drop_fcs++;
1267
1268 rq->stats.drop_total++;
1269
1270 /*
1271 * We do not unmap and chain the rx buffer to the skb.
1272 * We basically pretend this buffer is not used and will be recycled
1273 * by vmxnet3_rq_alloc_rx_buf()
1274 */
1275
1276 /*
1277 * ctx->skb may be NULL if this is the first and the only one
1278 * desc for the pkt
1279 */
1280 if (ctx->skb)
1281 dev_kfree_skb_irq(ctx->skb);
1282
1283 ctx->skb = NULL;
1284}
1285
1286
45dac1d6
SB
1287static u32
1288vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1289 union Vmxnet3_GenericDesc *gdesc)
1290{
1291 u32 hlen, maplen;
1292 union {
1293 void *ptr;
1294 struct ethhdr *eth;
65ec0bd1 1295 struct vlan_ethhdr *veth;
45dac1d6
SB
1296 struct iphdr *ipv4;
1297 struct ipv6hdr *ipv6;
1298 struct tcphdr *tcp;
1299 } hdr;
1300 BUG_ON(gdesc->rcd.tcp == 0);
1301
1302 maplen = skb_headlen(skb);
1303 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1304 return 0;
1305
65ec0bd1
RD
1306 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1307 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1308 hlen = sizeof(struct vlan_ethhdr);
1309 else
1310 hlen = sizeof(struct ethhdr);
1311
45dac1d6
SB
1312 hdr.eth = eth_hdr(skb);
1313 if (gdesc->rcd.v4) {
65ec0bd1
RD
1314 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1315 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1316 hdr.ptr += hlen;
45dac1d6
SB
1317 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1318 hlen = hdr.ipv4->ihl << 2;
1319 hdr.ptr += hdr.ipv4->ihl << 2;
1320 } else if (gdesc->rcd.v6) {
65ec0bd1
RD
1321 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1322 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1323 hdr.ptr += hlen;
45dac1d6
SB
1324 /* Use an estimated value, since we also need to handle
1325 * TSO case.
1326 */
1327 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1328 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1329 hlen = sizeof(struct ipv6hdr);
1330 hdr.ptr += sizeof(struct ipv6hdr);
1331 } else {
1332 /* Non-IP pkt, dont estimate header length */
1333 return 0;
1334 }
1335
1336 if (hlen + sizeof(struct tcphdr) > maplen)
1337 return 0;
1338
1339 return (hlen + (hdr.tcp->doff << 2));
1340}
1341
d1a890fa
SB
1342static int
1343vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1344 struct vmxnet3_adapter *adapter, int quota)
1345{
215faf9c
JP
1346 static const u32 rxprod_reg[2] = {
1347 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1348 };
0769636c 1349 u32 num_pkts = 0;
5318d809 1350 bool skip_page_frags = false;
d1a890fa
SB
1351 struct Vmxnet3_RxCompDesc *rcd;
1352 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
45dac1d6 1353 u16 segCnt = 0, mss = 0;
115924b6
SB
1354#ifdef __BIG_ENDIAN_BITFIELD
1355 struct Vmxnet3_RxDesc rxCmdDesc;
1356 struct Vmxnet3_RxCompDesc rxComp;
1357#endif
1358 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1359 &rxComp);
d1a890fa
SB
1360 while (rcd->gen == rq->comp_ring.gen) {
1361 struct vmxnet3_rx_buf_info *rbi;
5318d809
SB
1362 struct sk_buff *skb, *new_skb = NULL;
1363 struct page *new_page = NULL;
5738a09d 1364 dma_addr_t new_dma_addr;
d1a890fa
SB
1365 int num_to_alloc;
1366 struct Vmxnet3_RxDesc *rxd;
1367 u32 idx, ring_idx;
5318d809 1368 struct vmxnet3_cmd_ring *ring = NULL;
0769636c 1369 if (num_pkts >= quota) {
d1a890fa
SB
1370 /* we may stop even before we see the EOP desc of
1371 * the current pkt
1372 */
1373 break;
1374 }
f3002c13 1375
1376 /* Prevent any rcd field from being (speculatively) read before
1377 * rcd->gen is read.
1378 */
1379 dma_rmb();
1380
50a5ce3e
SK
1381 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1382 rcd->rqID != rq->dataRingQid);
d1a890fa 1383 idx = rcd->rxdIdx;
50a5ce3e 1384 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
5318d809 1385 ring = rq->rx_ring + ring_idx;
115924b6
SB
1386 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1387 &rxCmdDesc);
d1a890fa
SB
1388 rbi = rq->buf_info[ring_idx] + idx;
1389
115924b6
SB
1390 BUG_ON(rxd->addr != rbi->dma_addr ||
1391 rxd->len != rbi->len);
d1a890fa
SB
1392
1393 if (unlikely(rcd->eop && rcd->err)) {
1394 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1395 goto rcd_done;
1396 }
1397
1398 if (rcd->sop) { /* first buf of the pkt */
50a5ce3e
SK
1399 bool rxDataRingUsed;
1400 u16 len;
1401
d1a890fa 1402 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
50a5ce3e
SK
1403 (rcd->rqID != rq->qid &&
1404 rcd->rqID != rq->dataRingQid));
d1a890fa
SB
1405
1406 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1407 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1408
1409 if (unlikely(rcd->len == 0)) {
1410 /* Pretend the rx buffer is skipped. */
1411 BUG_ON(!(rcd->sop && rcd->eop));
fdcd79b9 1412 netdev_dbg(adapter->netdev,
f6965582 1413 "rxRing[%u][%u] 0 length\n",
d1a890fa
SB
1414 ring_idx, idx);
1415 goto rcd_done;
1416 }
1417
5318d809 1418 skip_page_frags = false;
d1a890fa 1419 ctx->skb = rbi->skb;
50a5ce3e
SK
1420
1421 rxDataRingUsed =
1422 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1423 len = rxDataRingUsed ? rcd->len : rbi->len;
0d735f13 1424 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
50a5ce3e 1425 len);
5318d809
SB
1426 if (new_skb == NULL) {
1427 /* Skb allocation failed, do not handover this
1428 * skb to stack. Reuse it. Drop the existing pkt
1429 */
1430 rq->stats.rx_buf_alloc_failure++;
1431 ctx->skb = NULL;
1432 rq->stats.drop_total++;
1433 skip_page_frags = true;
1434 goto rcd_done;
1435 }
d1a890fa 1436
50a5ce3e
SK
1437 if (rxDataRingUsed) {
1438 size_t sz;
1439
1440 BUG_ON(rcd->len > rq->data_ring.desc_size);
1441
1442 ctx->skb = new_skb;
1443 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1444 memcpy(new_skb->data,
1445 &rq->data_ring.base[sz], rcd->len);
1446 } else {
1447 ctx->skb = rbi->skb;
1448
1449 new_dma_addr =
1450 dma_map_single(&adapter->pdev->dev,
1451 new_skb->data, rbi->len,
bf7bec46 1452 DMA_FROM_DEVICE);
50a5ce3e
SK
1453 if (dma_mapping_error(&adapter->pdev->dev,
1454 new_dma_addr)) {
1455 dev_kfree_skb(new_skb);
1456 /* Skb allocation failed, do not
1457 * handover this skb to stack. Reuse
1458 * it. Drop the existing pkt.
1459 */
1460 rq->stats.rx_buf_alloc_failure++;
1461 ctx->skb = NULL;
1462 rq->stats.drop_total++;
1463 skip_page_frags = true;
1464 goto rcd_done;
1465 }
1466
1467 dma_unmap_single(&adapter->pdev->dev,
1468 rbi->dma_addr,
1469 rbi->len,
bf7bec46 1470 DMA_FROM_DEVICE);
50a5ce3e
SK
1471
1472 /* Immediate refill */
1473 rbi->skb = new_skb;
1474 rbi->dma_addr = new_dma_addr;
1475 rxd->addr = cpu_to_le64(rbi->dma_addr);
1476 rxd->len = rbi->len;
1477 }
d1a890fa 1478
7db11f75
SH
1479#ifdef VMXNET3_RSS
1480 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
b3973bb4
RD
1481 (adapter->netdev->features & NETIF_F_RXHASH)) {
1482 enum pkt_hash_types hash_type;
1483
1484 switch (rcd->rssType) {
1485 case VMXNET3_RCD_RSS_TYPE_IPV4:
1486 case VMXNET3_RCD_RSS_TYPE_IPV6:
1487 hash_type = PKT_HASH_TYPE_L3;
1488 break;
1489 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1490 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1491 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1492 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1493 hash_type = PKT_HASH_TYPE_L4;
1494 break;
1495 default:
1496 hash_type = PKT_HASH_TYPE_L3;
1497 break;
1498 }
2c15a154
MS
1499 skb_set_hash(ctx->skb,
1500 le32_to_cpu(rcd->rssHash),
b3973bb4
RD
1501 hash_type);
1502 }
7db11f75 1503#endif
d1a890fa 1504 skb_put(ctx->skb, rcd->len);
5318d809 1505
190af10f 1506 if (VMXNET3_VERSION_GE_2(adapter) &&
45dac1d6
SB
1507 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1508 struct Vmxnet3_RxCompDescExt *rcdlro;
1509 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1510
1511 segCnt = rcdlro->segCnt;
50219538 1512 WARN_ON_ONCE(segCnt == 0);
45dac1d6
SB
1513 mss = rcdlro->mss;
1514 if (unlikely(segCnt <= 1))
1515 segCnt = 0;
1516 } else {
1517 segCnt = 0;
1518 }
d1a890fa 1519 } else {
5318d809
SB
1520 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1521
d1a890fa 1522 /* non SOP buffer must be type 1 in most cases */
5318d809
SB
1523 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1524 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
d1a890fa 1525
5318d809
SB
1526 /* If an sop buffer was dropped, skip all
1527 * following non-sop fragments. They will be reused.
1528 */
1529 if (skip_page_frags)
1530 goto rcd_done;
d1a890fa 1531
c41fcce9
SB
1532 if (rcd->len) {
1533 new_page = alloc_page(GFP_ATOMIC);
5318d809
SB
1534 /* Replacement page frag could not be allocated.
1535 * Reuse this page. Drop the pkt and free the
1536 * skb which contained this page as a frag. Skip
1537 * processing all the following non-sop frags.
d1a890fa 1538 */
c41fcce9
SB
1539 if (unlikely(!new_page)) {
1540 rq->stats.rx_buf_alloc_failure++;
1541 dev_kfree_skb(ctx->skb);
1542 ctx->skb = NULL;
1543 skip_page_frags = true;
1544 goto rcd_done;
1545 }
58caf637
SK
1546 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1547 new_page,
1548 0, PAGE_SIZE,
bf7bec46 1549 DMA_FROM_DEVICE);
5738a09d
AK
1550 if (dma_mapping_error(&adapter->pdev->dev,
1551 new_dma_addr)) {
1552 put_page(new_page);
1553 rq->stats.rx_buf_alloc_failure++;
1554 dev_kfree_skb(ctx->skb);
1555 ctx->skb = NULL;
1556 skip_page_frags = true;
1557 goto rcd_done;
1558 }
5318d809 1559
b0eb57cb 1560 dma_unmap_page(&adapter->pdev->dev,
5318d809 1561 rbi->dma_addr, rbi->len,
bf7bec46 1562 DMA_FROM_DEVICE);
5318d809
SB
1563
1564 vmxnet3_append_frag(ctx->skb, rcd, rbi);
5318d809 1565
c41fcce9
SB
1566 /* Immediate refill */
1567 rbi->page = new_page;
5738a09d 1568 rbi->dma_addr = new_dma_addr;
c41fcce9
SB
1569 rxd->addr = cpu_to_le64(rbi->dma_addr);
1570 rxd->len = rbi->len;
1571 }
d1a890fa
SB
1572 }
1573
5318d809 1574
d1a890fa
SB
1575 skb = ctx->skb;
1576 if (rcd->eop) {
45dac1d6 1577 u32 mtu = adapter->netdev->mtu;
d1a890fa 1578 skb->len += skb->data_len;
d1a890fa
SB
1579
1580 vmxnet3_rx_csum(adapter, skb,
1581 (union Vmxnet3_GenericDesc *)rcd);
1582 skb->protocol = eth_type_trans(skb, adapter->netdev);
034f4057
RD
1583 if (!rcd->tcp ||
1584 !(adapter->netdev->features & NETIF_F_LRO))
45dac1d6
SB
1585 goto not_lro;
1586
1587 if (segCnt != 0 && mss != 0) {
1588 skb_shinfo(skb)->gso_type = rcd->v4 ?
1589 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1590 skb_shinfo(skb)->gso_size = mss;
1591 skb_shinfo(skb)->gso_segs = segCnt;
1592 } else if (segCnt != 0 || skb->len > mtu) {
1593 u32 hlen;
1594
1595 hlen = vmxnet3_get_hdr_len(adapter, skb,
1596 (union Vmxnet3_GenericDesc *)rcd);
1597 if (hlen == 0)
1598 goto not_lro;
1599
1600 skb_shinfo(skb)->gso_type =
1601 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1602 if (segCnt != 0) {
1603 skb_shinfo(skb)->gso_segs = segCnt;
1604 skb_shinfo(skb)->gso_size =
1605 DIV_ROUND_UP(skb->len -
1606 hlen, segCnt);
1607 } else {
1608 skb_shinfo(skb)->gso_size = mtu - hlen;
1609 }
1610 }
1611not_lro:
72e85c45 1612 if (unlikely(rcd->ts))
86a9bad3 1613 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
72e85c45 1614
213ade8c
JG
1615 if (adapter->netdev->features & NETIF_F_LRO)
1616 netif_receive_skb(skb);
1617 else
1618 napi_gro_receive(&rq->napi, skb);
d1a890fa 1619
d1a890fa 1620 ctx->skb = NULL;
0769636c 1621 num_pkts++;
d1a890fa
SB
1622 }
1623
1624rcd_done:
5318d809
SB
1625 /* device may have skipped some rx descs */
1626 ring->next2comp = idx;
1627 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1628 ring = rq->rx_ring + ring_idx;
f3002c13 1629
1630 /* Ensure that the writes to rxd->gen bits will be observed
1631 * after all other writes to rxd objects.
1632 */
1633 dma_wmb();
1634
5318d809
SB
1635 while (num_to_alloc) {
1636 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1637 &rxCmdDesc);
1638 BUG_ON(!rxd->addr);
1639
1640 /* Recv desc is ready to be used by the device */
1641 rxd->gen = ring->gen;
1642 vmxnet3_cmd_ring_adv_next2fill(ring);
1643 num_to_alloc--;
1644 }
1645
1646 /* if needed, update the register */
1647 if (unlikely(rq->shared->updateRxProd)) {
1648 VMXNET3_WRITE_BAR0_REG(adapter,
96800ee7 1649 rxprod_reg[ring_idx] + rq->qid * 8,
1650 ring->next2fill);
d1a890fa
SB
1651 }
1652
1653 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
115924b6 1654 vmxnet3_getRxComp(rcd,
96800ee7 1655 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
d1a890fa
SB
1656 }
1657
0769636c 1658 return num_pkts;
d1a890fa
SB
1659}
1660
1661
1662static void
1663vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1664 struct vmxnet3_adapter *adapter)
1665{
1666 u32 i, ring_idx;
1667 struct Vmxnet3_RxDesc *rxd;
1668
1669 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1670 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
115924b6
SB
1671#ifdef __BIG_ENDIAN_BITFIELD
1672 struct Vmxnet3_RxDesc rxDesc;
1673#endif
1674 vmxnet3_getRxDesc(rxd,
1675 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
d1a890fa
SB
1676
1677 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1678 rq->buf_info[ring_idx][i].skb) {
b0eb57cb 1679 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
bf7bec46 1680 rxd->len, DMA_FROM_DEVICE);
d1a890fa
SB
1681 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1682 rq->buf_info[ring_idx][i].skb = NULL;
1683 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1684 rq->buf_info[ring_idx][i].page) {
b0eb57cb 1685 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
bf7bec46 1686 rxd->len, DMA_FROM_DEVICE);
d1a890fa
SB
1687 put_page(rq->buf_info[ring_idx][i].page);
1688 rq->buf_info[ring_idx][i].page = NULL;
1689 }
1690 }
1691
1692 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1693 rq->rx_ring[ring_idx].next2fill =
1694 rq->rx_ring[ring_idx].next2comp = 0;
d1a890fa
SB
1695 }
1696
1697 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1698 rq->comp_ring.next2proc = 0;
1699}
1700
1701
09c5088e
SB
1702static void
1703vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1704{
1705 int i;
1706
1707 for (i = 0; i < adapter->num_rx_queues; i++)
1708 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1709}
1710
1711
280b74f7 1712static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1713 struct vmxnet3_adapter *adapter)
d1a890fa
SB
1714{
1715 int i;
1716 int j;
1717
1718 /* all rx buffers must have already been freed */
1719 for (i = 0; i < 2; i++) {
1720 if (rq->buf_info[i]) {
1721 for (j = 0; j < rq->rx_ring[i].size; j++)
1722 BUG_ON(rq->buf_info[i][j].page != NULL);
1723 }
1724 }
1725
1726
d1a890fa
SB
1727 for (i = 0; i < 2; i++) {
1728 if (rq->rx_ring[i].base) {
b0eb57cb
AK
1729 dma_free_coherent(&adapter->pdev->dev,
1730 rq->rx_ring[i].size
1731 * sizeof(struct Vmxnet3_RxDesc),
1732 rq->rx_ring[i].base,
1733 rq->rx_ring[i].basePA);
d1a890fa
SB
1734 rq->rx_ring[i].base = NULL;
1735 }
d1a890fa
SB
1736 }
1737
50a5ce3e
SK
1738 if (rq->data_ring.base) {
1739 dma_free_coherent(&adapter->pdev->dev,
1740 rq->rx_ring[0].size * rq->data_ring.desc_size,
1741 rq->data_ring.base, rq->data_ring.basePA);
1742 rq->data_ring.base = NULL;
1743 }
1744
d1a890fa 1745 if (rq->comp_ring.base) {
b0eb57cb
AK
1746 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1747 * sizeof(struct Vmxnet3_RxCompDesc),
1748 rq->comp_ring.base, rq->comp_ring.basePA);
d1a890fa
SB
1749 rq->comp_ring.base = NULL;
1750 }
b0eb57cb 1751
de1da8bc
RD
1752 kfree(rq->buf_info[0]);
1753 rq->buf_info[0] = NULL;
1754 rq->buf_info[1] = NULL;
d1a890fa
SB
1755}
1756
bb40aca7 1757static void
50a5ce3e
SK
1758vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1759{
1760 int i;
1761
1762 for (i = 0; i < adapter->num_rx_queues; i++) {
1763 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1764
1765 if (rq->data_ring.base) {
1766 dma_free_coherent(&adapter->pdev->dev,
1767 (rq->rx_ring[0].size *
1768 rq->data_ring.desc_size),
1769 rq->data_ring.base,
1770 rq->data_ring.basePA);
1771 rq->data_ring.base = NULL;
1772 rq->data_ring.desc_size = 0;
1773 }
1774 }
1775}
d1a890fa
SB
1776
1777static int
1778vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1779 struct vmxnet3_adapter *adapter)
1780{
1781 int i;
1782
1783 /* initialize buf_info */
1784 for (i = 0; i < rq->rx_ring[0].size; i++) {
1785
1786 /* 1st buf for a pkt is skbuff */
1787 if (i % adapter->rx_buf_per_pkt == 0) {
1788 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1789 rq->buf_info[0][i].len = adapter->skb_buf_size;
1790 } else { /* subsequent bufs for a pkt is frag */
1791 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1792 rq->buf_info[0][i].len = PAGE_SIZE;
1793 }
1794 }
1795 for (i = 0; i < rq->rx_ring[1].size; i++) {
1796 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1797 rq->buf_info[1][i].len = PAGE_SIZE;
1798 }
1799
1800 /* reset internal state and allocate buffers for both rings */
1801 for (i = 0; i < 2; i++) {
1802 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
d1a890fa
SB
1803
1804 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1805 sizeof(struct Vmxnet3_RxDesc));
1806 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1807 }
1808 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1809 adapter) == 0) {
1810 /* at least has 1 rx buffer for the 1st ring */
1811 return -ENOMEM;
1812 }
1813 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1814
1815 /* reset the comp ring */
1816 rq->comp_ring.next2proc = 0;
1817 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1818 sizeof(struct Vmxnet3_RxCompDesc));
1819 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1820
1821 /* reset rxctx */
1822 rq->rx_ctx.skb = NULL;
1823
1824 /* stats are not reset */
1825 return 0;
1826}
1827
1828
09c5088e
SB
1829static int
1830vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1831{
1832 int i, err = 0;
1833
1834 for (i = 0; i < adapter->num_rx_queues; i++) {
1835 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1836 if (unlikely(err)) {
1837 dev_err(&adapter->netdev->dev, "%s: failed to "
1838 "initialize rx queue%i\n",
1839 adapter->netdev->name, i);
1840 break;
1841 }
1842 }
1843 return err;
1844
1845}
1846
1847
d1a890fa
SB
1848static int
1849vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1850{
1851 int i;
1852 size_t sz;
1853 struct vmxnet3_rx_buf_info *bi;
1854
1855 for (i = 0; i < 2; i++) {
1856
1857 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
b0eb57cb
AK
1858 rq->rx_ring[i].base = dma_alloc_coherent(
1859 &adapter->pdev->dev, sz,
1860 &rq->rx_ring[i].basePA,
1861 GFP_KERNEL);
d1a890fa 1862 if (!rq->rx_ring[i].base) {
204a6e65
SH
1863 netdev_err(adapter->netdev,
1864 "failed to allocate rx ring %d\n", i);
d1a890fa
SB
1865 goto err;
1866 }
1867 }
1868
50a5ce3e
SK
1869 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1870 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1871 rq->data_ring.base =
1872 dma_alloc_coherent(&adapter->pdev->dev, sz,
1873 &rq->data_ring.basePA,
1874 GFP_KERNEL);
1875 if (!rq->data_ring.base) {
1876 netdev_err(adapter->netdev,
1877 "rx data ring will be disabled\n");
1878 adapter->rxdataring_enabled = false;
1879 }
1880 } else {
1881 rq->data_ring.base = NULL;
1882 rq->data_ring.desc_size = 0;
1883 }
1884
d1a890fa 1885 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
b0eb57cb
AK
1886 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1887 &rq->comp_ring.basePA,
1888 GFP_KERNEL);
d1a890fa 1889 if (!rq->comp_ring.base) {
204a6e65 1890 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
d1a890fa
SB
1891 goto err;
1892 }
1893
de1da8bc
RD
1894 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
1895 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
1896 dev_to_node(&adapter->pdev->dev));
e404decb 1897 if (!bi)
d1a890fa 1898 goto err;
e404decb 1899
d1a890fa
SB
1900 rq->buf_info[0] = bi;
1901 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1902
1903 return 0;
1904
1905err:
1906 vmxnet3_rq_destroy(rq, adapter);
1907 return -ENOMEM;
1908}
1909
1910
09c5088e
SB
1911static int
1912vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1913{
1914 int i, err = 0;
1915
50a5ce3e
SK
1916 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1917
09c5088e
SB
1918 for (i = 0; i < adapter->num_rx_queues; i++) {
1919 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1920 if (unlikely(err)) {
1921 dev_err(&adapter->netdev->dev,
1922 "%s: failed to create rx queue%i\n",
1923 adapter->netdev->name, i);
1924 goto err_out;
1925 }
1926 }
50a5ce3e
SK
1927
1928 if (!adapter->rxdataring_enabled)
1929 vmxnet3_rq_destroy_all_rxdataring(adapter);
1930
09c5088e
SB
1931 return err;
1932err_out:
1933 vmxnet3_rq_destroy_all(adapter);
1934 return err;
1935
1936}
1937
1938/* Multiple queue aware polling function for tx and rx */
1939
d1a890fa
SB
1940static int
1941vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1942{
09c5088e 1943 int rcd_done = 0, i;
d1a890fa
SB
1944 if (unlikely(adapter->shared->ecr))
1945 vmxnet3_process_events(adapter);
09c5088e
SB
1946 for (i = 0; i < adapter->num_tx_queues; i++)
1947 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
d1a890fa 1948
09c5088e
SB
1949 for (i = 0; i < adapter->num_rx_queues; i++)
1950 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1951 adapter, budget);
1952 return rcd_done;
d1a890fa
SB
1953}
1954
1955
1956static int
1957vmxnet3_poll(struct napi_struct *napi, int budget)
1958{
09c5088e
SB
1959 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1960 struct vmxnet3_rx_queue, napi);
1961 int rxd_done;
1962
1963 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1964
1965 if (rxd_done < budget) {
6ad20165 1966 napi_complete_done(napi, rxd_done);
09c5088e
SB
1967 vmxnet3_enable_all_intrs(rx_queue->adapter);
1968 }
1969 return rxd_done;
1970}
1971
1972/*
1973 * NAPI polling function for MSI-X mode with multiple Rx queues
1974 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1975 */
1976
1977static int
1978vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1979{
1980 struct vmxnet3_rx_queue *rq = container_of(napi,
1981 struct vmxnet3_rx_queue, napi);
1982 struct vmxnet3_adapter *adapter = rq->adapter;
d1a890fa
SB
1983 int rxd_done;
1984
09c5088e
SB
1985 /* When sharing interrupt with corresponding tx queue, process
1986 * tx completions in that queue as well
1987 */
1988 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1989 struct vmxnet3_tx_queue *tq =
1990 &adapter->tx_queue[rq - adapter->rx_queue];
1991 vmxnet3_tq_tx_complete(tq, adapter);
1992 }
1993
1994 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
d1a890fa
SB
1995
1996 if (rxd_done < budget) {
6ad20165 1997 napi_complete_done(napi, rxd_done);
09c5088e 1998 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
d1a890fa
SB
1999 }
2000 return rxd_done;
2001}
2002
2003
09c5088e
SB
2004#ifdef CONFIG_PCI_MSI
2005
2006/*
2007 * Handle completion interrupts on tx queues
2008 * Returns whether or not the intr is handled
2009 */
2010
2011static irqreturn_t
2012vmxnet3_msix_tx(int irq, void *data)
2013{
2014 struct vmxnet3_tx_queue *tq = data;
2015 struct vmxnet3_adapter *adapter = tq->adapter;
2016
2017 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2018 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2019
2020 /* Handle the case where only one irq is allocate for all tx queues */
2021 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2022 int i;
2023 for (i = 0; i < adapter->num_tx_queues; i++) {
2024 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2025 vmxnet3_tq_tx_complete(txq, adapter);
2026 }
2027 } else {
2028 vmxnet3_tq_tx_complete(tq, adapter);
2029 }
2030 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2031
2032 return IRQ_HANDLED;
2033}
2034
2035
2036/*
2037 * Handle completion interrupts on rx queues. Returns whether or not the
2038 * intr is handled
2039 */
2040
2041static irqreturn_t
2042vmxnet3_msix_rx(int irq, void *data)
2043{
2044 struct vmxnet3_rx_queue *rq = data;
2045 struct vmxnet3_adapter *adapter = rq->adapter;
2046
2047 /* disable intr if needed */
2048 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2049 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2050 napi_schedule(&rq->napi);
2051
2052 return IRQ_HANDLED;
2053}
2054
2055/*
2056 *----------------------------------------------------------------------------
2057 *
2058 * vmxnet3_msix_event --
2059 *
2060 * vmxnet3 msix event intr handler
2061 *
2062 * Result:
2063 * whether or not the intr is handled
2064 *
2065 *----------------------------------------------------------------------------
2066 */
2067
2068static irqreturn_t
2069vmxnet3_msix_event(int irq, void *data)
2070{
2071 struct net_device *dev = data;
2072 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2073
2074 /* disable intr if needed */
2075 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2076 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2077
2078 if (adapter->shared->ecr)
2079 vmxnet3_process_events(adapter);
2080
2081 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2082
2083 return IRQ_HANDLED;
2084}
2085
2086#endif /* CONFIG_PCI_MSI */
2087
2088
d1a890fa
SB
2089/* Interrupt handler for vmxnet3 */
2090static irqreturn_t
2091vmxnet3_intr(int irq, void *dev_id)
2092{
2093 struct net_device *dev = dev_id;
2094 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2095
09c5088e 2096 if (adapter->intr.type == VMXNET3_IT_INTX) {
d1a890fa
SB
2097 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2098 if (unlikely(icr == 0))
2099 /* not ours */
2100 return IRQ_NONE;
2101 }
2102
2103
2104 /* disable intr if needed */
2105 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
09c5088e 2106 vmxnet3_disable_all_intrs(adapter);
d1a890fa 2107
09c5088e 2108 napi_schedule(&adapter->rx_queue[0].napi);
d1a890fa
SB
2109
2110 return IRQ_HANDLED;
2111}
2112
2113#ifdef CONFIG_NET_POLL_CONTROLLER
2114
d1a890fa
SB
2115/* netpoll callback. */
2116static void
2117vmxnet3_netpoll(struct net_device *netdev)
2118{
2119 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 2120
d25f06ea 2121 switch (adapter->intr.type) {
0a8d8c44
AB
2122#ifdef CONFIG_PCI_MSI
2123 case VMXNET3_IT_MSIX: {
2124 int i;
d25f06ea
NH
2125 for (i = 0; i < adapter->num_rx_queues; i++)
2126 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2127 break;
0a8d8c44
AB
2128 }
2129#endif
d25f06ea
NH
2130 case VMXNET3_IT_MSI:
2131 default:
2132 vmxnet3_intr(0, adapter->netdev);
2133 break;
2134 }
d1a890fa 2135
d1a890fa 2136}
09c5088e 2137#endif /* CONFIG_NET_POLL_CONTROLLER */
d1a890fa
SB
2138
2139static int
2140vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2141{
09c5088e
SB
2142 struct vmxnet3_intr *intr = &adapter->intr;
2143 int err = 0, i;
2144 int vector = 0;
d1a890fa 2145
8f7e524c 2146#ifdef CONFIG_PCI_MSI
d1a890fa 2147 if (adapter->intr.type == VMXNET3_IT_MSIX) {
09c5088e
SB
2148 for (i = 0; i < adapter->num_tx_queues; i++) {
2149 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2150 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2151 adapter->netdev->name, vector);
2152 err = request_irq(
2153 intr->msix_entries[vector].vector,
2154 vmxnet3_msix_tx, 0,
2155 adapter->tx_queue[i].name,
2156 &adapter->tx_queue[i]);
2157 } else {
2158 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2159 adapter->netdev->name, vector);
2160 }
2161 if (err) {
2162 dev_err(&adapter->netdev->dev,
2163 "Failed to request irq for MSIX, %s, "
2164 "error %d\n",
2165 adapter->tx_queue[i].name, err);
2166 return err;
2167 }
2168
2169 /* Handle the case where only 1 MSIx was allocated for
2170 * all tx queues */
2171 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2172 for (; i < adapter->num_tx_queues; i++)
2173 adapter->tx_queue[i].comp_ring.intr_idx
2174 = vector;
2175 vector++;
2176 break;
2177 } else {
2178 adapter->tx_queue[i].comp_ring.intr_idx
2179 = vector++;
2180 }
2181 }
2182 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2183 vector = 0;
2184
2185 for (i = 0; i < adapter->num_rx_queues; i++) {
2186 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2187 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2188 adapter->netdev->name, vector);
2189 else
2190 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2191 adapter->netdev->name, vector);
2192 err = request_irq(intr->msix_entries[vector].vector,
2193 vmxnet3_msix_rx, 0,
2194 adapter->rx_queue[i].name,
2195 &(adapter->rx_queue[i]));
2196 if (err) {
204a6e65
SH
2197 netdev_err(adapter->netdev,
2198 "Failed to request irq for MSIX, "
2199 "%s, error %d\n",
2200 adapter->rx_queue[i].name, err);
09c5088e
SB
2201 return err;
2202 }
2203
2204 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2205 }
2206
2207 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2208 adapter->netdev->name, vector);
2209 err = request_irq(intr->msix_entries[vector].vector,
2210 vmxnet3_msix_event, 0,
2211 intr->event_msi_vector_name, adapter->netdev);
2212 intr->event_intr_idx = vector;
2213
2214 } else if (intr->type == VMXNET3_IT_MSI) {
2215 adapter->num_rx_queues = 1;
d1a890fa
SB
2216 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2217 adapter->netdev->name, adapter->netdev);
09c5088e 2218 } else {
115924b6 2219#endif
09c5088e 2220 adapter->num_rx_queues = 1;
d1a890fa
SB
2221 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2222 IRQF_SHARED, adapter->netdev->name,
2223 adapter->netdev);
09c5088e 2224#ifdef CONFIG_PCI_MSI
d1a890fa 2225 }
09c5088e
SB
2226#endif
2227 intr->num_intrs = vector + 1;
2228 if (err) {
204a6e65
SH
2229 netdev_err(adapter->netdev,
2230 "Failed to request irq (intr type:%d), error %d\n",
2231 intr->type, err);
09c5088e
SB
2232 } else {
2233 /* Number of rx queues will not change after this */
2234 for (i = 0; i < adapter->num_rx_queues; i++) {
2235 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2236 rq->qid = i;
2237 rq->qid2 = i + adapter->num_rx_queues;
50a5ce3e 2238 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
09c5088e 2239 }
d1a890fa 2240
09c5088e
SB
2241 /* init our intr settings */
2242 for (i = 0; i < intr->num_intrs; i++)
2243 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2244 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2245 adapter->intr.event_intr_idx = 0;
2246 for (i = 0; i < adapter->num_tx_queues; i++)
2247 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2248 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2249 }
d1a890fa 2250
204a6e65
SH
2251 netdev_info(adapter->netdev,
2252 "intr type %u, mode %u, %u vectors allocated\n",
2253 intr->type, intr->mask_mode, intr->num_intrs);
d1a890fa
SB
2254 }
2255
2256 return err;
2257}
2258
2259
2260static void
2261vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2262{
09c5088e
SB
2263 struct vmxnet3_intr *intr = &adapter->intr;
2264 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
d1a890fa 2265
09c5088e 2266 switch (intr->type) {
8f7e524c 2267#ifdef CONFIG_PCI_MSI
d1a890fa
SB
2268 case VMXNET3_IT_MSIX:
2269 {
09c5088e 2270 int i, vector = 0;
d1a890fa 2271
09c5088e
SB
2272 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2273 for (i = 0; i < adapter->num_tx_queues; i++) {
2274 free_irq(intr->msix_entries[vector++].vector,
2275 &(adapter->tx_queue[i]));
2276 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2277 break;
2278 }
2279 }
2280
2281 for (i = 0; i < adapter->num_rx_queues; i++) {
2282 free_irq(intr->msix_entries[vector++].vector,
2283 &(adapter->rx_queue[i]));
2284 }
2285
2286 free_irq(intr->msix_entries[vector].vector,
2287 adapter->netdev);
2288 BUG_ON(vector >= intr->num_intrs);
d1a890fa
SB
2289 break;
2290 }
8f7e524c 2291#endif
d1a890fa
SB
2292 case VMXNET3_IT_MSI:
2293 free_irq(adapter->pdev->irq, adapter->netdev);
2294 break;
2295 case VMXNET3_IT_INTX:
2296 free_irq(adapter->pdev->irq, adapter->netdev);
2297 break;
2298 default:
c068e777 2299 BUG();
d1a890fa
SB
2300 }
2301}
2302
d1a890fa
SB
2303
2304static void
2305vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2306{
72e85c45
JG
2307 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2308 u16 vid;
d1a890fa 2309
72e85c45
JG
2310 /* allow untagged pkts */
2311 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2312
2313 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2314 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
d1a890fa
SB
2315}
2316
2317
8e586137 2318static int
80d5c368 2319vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
d1a890fa
SB
2320{
2321 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 2322
f6957f88
JG
2323 if (!(netdev->flags & IFF_PROMISC)) {
2324 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2325 unsigned long flags;
2326
2327 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2328 spin_lock_irqsave(&adapter->cmd_lock, flags);
2329 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2330 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2331 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2332 }
72e85c45
JG
2333
2334 set_bit(vid, adapter->active_vlans);
8e586137
JP
2335
2336 return 0;
d1a890fa
SB
2337}
2338
2339
8e586137 2340static int
80d5c368 2341vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
d1a890fa
SB
2342{
2343 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa 2344
f6957f88
JG
2345 if (!(netdev->flags & IFF_PROMISC)) {
2346 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2347 unsigned long flags;
2348
2349 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2350 spin_lock_irqsave(&adapter->cmd_lock, flags);
2351 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2352 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2353 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2354 }
72e85c45
JG
2355
2356 clear_bit(vid, adapter->active_vlans);
8e586137
JP
2357
2358 return 0;
d1a890fa
SB
2359}
2360
2361
2362static u8 *
2363vmxnet3_copy_mc(struct net_device *netdev)
2364{
2365 u8 *buf = NULL;
4cd24eaf 2366 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
d1a890fa
SB
2367
2368 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2369 if (sz <= 0xffff) {
2370 /* We may be called with BH disabled */
2371 buf = kmalloc(sz, GFP_ATOMIC);
2372 if (buf) {
22bedad3 2373 struct netdev_hw_addr *ha;
567ec874 2374 int i = 0;
d1a890fa 2375
22bedad3
JP
2376 netdev_for_each_mc_addr(ha, netdev)
2377 memcpy(buf + i++ * ETH_ALEN, ha->addr,
d1a890fa 2378 ETH_ALEN);
d1a890fa
SB
2379 }
2380 }
2381 return buf;
2382}
2383
2384
2385static void
2386vmxnet3_set_mc(struct net_device *netdev)
2387{
2388 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
83d0feff 2389 unsigned long flags;
d1a890fa
SB
2390 struct Vmxnet3_RxFilterConf *rxConf =
2391 &adapter->shared->devRead.rxFilterConf;
2392 u8 *new_table = NULL;
b0eb57cb 2393 dma_addr_t new_table_pa = 0;
fb5c6cfa 2394 bool new_table_pa_valid = false;
d1a890fa
SB
2395 u32 new_mode = VMXNET3_RXM_UCAST;
2396
72e85c45
JG
2397 if (netdev->flags & IFF_PROMISC) {
2398 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2399 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2400
d1a890fa 2401 new_mode |= VMXNET3_RXM_PROMISC;
72e85c45
JG
2402 } else {
2403 vmxnet3_restore_vlan(adapter);
2404 }
d1a890fa
SB
2405
2406 if (netdev->flags & IFF_BROADCAST)
2407 new_mode |= VMXNET3_RXM_BCAST;
2408
2409 if (netdev->flags & IFF_ALLMULTI)
2410 new_mode |= VMXNET3_RXM_ALL_MULTI;
2411 else
4cd24eaf 2412 if (!netdev_mc_empty(netdev)) {
d1a890fa
SB
2413 new_table = vmxnet3_copy_mc(netdev);
2414 if (new_table) {
d37d5ec8
SK
2415 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2416
2417 rxConf->mfTableLen = cpu_to_le16(sz);
b0eb57cb
AK
2418 new_table_pa = dma_map_single(
2419 &adapter->pdev->dev,
2420 new_table,
d37d5ec8 2421 sz,
bf7bec46 2422 DMA_TO_DEVICE);
fb5c6cfa
AK
2423 if (!dma_mapping_error(&adapter->pdev->dev,
2424 new_table_pa)) {
2425 new_mode |= VMXNET3_RXM_MCAST;
2426 new_table_pa_valid = true;
2427 rxConf->mfTablePA = cpu_to_le64(
2428 new_table_pa);
2429 }
4ad9a64f 2430 }
fb5c6cfa 2431 if (!new_table_pa_valid) {
4ad9a64f
AK
2432 netdev_info(netdev,
2433 "failed to copy mcast list, setting ALL_MULTI\n");
d1a890fa
SB
2434 new_mode |= VMXNET3_RXM_ALL_MULTI;
2435 }
2436 }
2437
d1a890fa
SB
2438 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2439 rxConf->mfTableLen = 0;
2440 rxConf->mfTablePA = 0;
2441 }
2442
83d0feff 2443 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa 2444 if (new_mode != rxConf->rxMode) {
115924b6 2445 rxConf->rxMode = cpu_to_le32(new_mode);
d1a890fa
SB
2446 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2447 VMXNET3_CMD_UPDATE_RX_MODE);
72e85c45
JG
2448 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2449 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
d1a890fa
SB
2450 }
2451
2452 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2453 VMXNET3_CMD_UPDATE_MAC_FILTERS);
83d0feff 2454 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa 2455
fb5c6cfa 2456 if (new_table_pa_valid)
b0eb57cb 2457 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
bf7bec46 2458 rxConf->mfTableLen, DMA_TO_DEVICE);
4ad9a64f 2459 kfree(new_table);
d1a890fa
SB
2460}
2461
09c5088e
SB
2462void
2463vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2464{
2465 int i;
2466
2467 for (i = 0; i < adapter->num_rx_queues; i++)
2468 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2469}
2470
d1a890fa
SB
2471
2472/*
2473 * Set up driver_shared based on settings in adapter.
2474 */
2475
2476static void
2477vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2478{
2479 struct Vmxnet3_DriverShared *shared = adapter->shared;
2480 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
39f9895a 2481 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
d1a890fa
SB
2482 struct Vmxnet3_TxQueueConf *tqc;
2483 struct Vmxnet3_RxQueueConf *rqc;
2484 int i;
2485
2486 memset(shared, 0, sizeof(*shared));
2487
2488 /* driver settings */
115924b6
SB
2489 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2490 devRead->misc.driverInfo.version = cpu_to_le32(
2491 VMXNET3_DRIVER_VERSION_NUM);
d1a890fa
SB
2492 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2493 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2494 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
115924b6
SB
2495 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2496 *((u32 *)&devRead->misc.driverInfo.gos));
2497 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2498 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
d1a890fa 2499
b0eb57cb 2500 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
115924b6 2501 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
d1a890fa
SB
2502
2503 /* set up feature flags */
a0d2730c 2504 if (adapter->netdev->features & NETIF_F_RXCSUM)
3843e515 2505 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
d1a890fa 2506
a0d2730c 2507 if (adapter->netdev->features & NETIF_F_LRO) {
3843e515 2508 devRead->misc.uptFeatures |= UPT1_F_LRO;
115924b6 2509 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
d1a890fa 2510 }
f646968f 2511 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3843e515 2512 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
d1a890fa 2513
dacce2be
RD
2514 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2515 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2516 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2517
115924b6
SB
2518 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2519 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2520 devRead->misc.queueDescLen = cpu_to_le32(
09c5088e
SB
2521 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2522 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
d1a890fa
SB
2523
2524 /* tx queue settings */
09c5088e
SB
2525 devRead->misc.numTxQueues = adapter->num_tx_queues;
2526 for (i = 0; i < adapter->num_tx_queues; i++) {
2527 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2528 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2529 tqc = &adapter->tqd_start[i].conf;
2530 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2531 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2532 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
de1da8bc 2533 tqc->ddPA = cpu_to_le64(~0ULL);
09c5088e
SB
2534 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2535 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
3c8b3efc 2536 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
09c5088e 2537 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
de1da8bc 2538 tqc->ddLen = cpu_to_le32(0);
09c5088e
SB
2539 tqc->intrIdx = tq->comp_ring.intr_idx;
2540 }
d1a890fa
SB
2541
2542 /* rx queue settings */
09c5088e
SB
2543 devRead->misc.numRxQueues = adapter->num_rx_queues;
2544 for (i = 0; i < adapter->num_rx_queues; i++) {
2545 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2546 rqc = &adapter->rqd_start[i].conf;
2547 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2548 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2549 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
de1da8bc 2550 rqc->ddPA = cpu_to_le64(~0ULL);
09c5088e
SB
2551 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2552 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2553 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
de1da8bc 2554 rqc->ddLen = cpu_to_le32(0);
09c5088e 2555 rqc->intrIdx = rq->comp_ring.intr_idx;
50a5ce3e
SK
2556 if (VMXNET3_VERSION_GE_3(adapter)) {
2557 rqc->rxDataRingBasePA =
2558 cpu_to_le64(rq->data_ring.basePA);
2559 rqc->rxDataRingDescSize =
2560 cpu_to_le16(rq->data_ring.desc_size);
2561 }
09c5088e
SB
2562 }
2563
2564#ifdef VMXNET3_RSS
2565 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2566
2567 if (adapter->rss) {
2568 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
66d35910 2569
09c5088e
SB
2570 devRead->misc.uptFeatures |= UPT1_F_RSS;
2571 devRead->misc.numRxQueues = adapter->num_rx_queues;
2572 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2573 UPT1_RSS_HASH_TYPE_IPV4 |
2574 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2575 UPT1_RSS_HASH_TYPE_IPV6;
2576 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2577 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2578 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
6bf79cdd 2579 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
66d35910 2580
09c5088e 2581 for (i = 0; i < rssConf->indTableSize; i++)
278bc429
BH
2582 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2583 i, adapter->num_rx_queues);
09c5088e
SB
2584
2585 devRead->rssConfDesc.confVer = 1;
b0eb57cb
AK
2586 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2587 devRead->rssConfDesc.confPA =
2588 cpu_to_le64(adapter->rss_conf_pa);
09c5088e
SB
2589 }
2590
2591#endif /* VMXNET3_RSS */
d1a890fa
SB
2592
2593 /* intr settings */
39f9895a
RD
2594 if (!VMXNET3_VERSION_GE_6(adapter) ||
2595 !adapter->queuesExtEnabled) {
2596 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2597 VMXNET3_IMM_AUTO;
2598 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2599 for (i = 0; i < adapter->intr.num_intrs; i++)
2600 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2601
2602 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2603 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2604 } else {
2605 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2606 VMXNET3_IMM_AUTO;
2607 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2608 for (i = 0; i < adapter->intr.num_intrs; i++)
2609 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
d1a890fa 2610
39f9895a
RD
2611 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2612 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2613 }
d1a890fa
SB
2614
2615 /* rx filter settings */
2616 devRead->rxFilterConf.rxMode = 0;
2617 vmxnet3_restore_vlan(adapter);
f9f25026
SB
2618 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2619
d1a890fa
SB
2620 /* the rest are already zeroed */
2621}
2622
4edef40e
SK
2623static void
2624vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2625{
2626 struct Vmxnet3_DriverShared *shared = adapter->shared;
2627 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2628 unsigned long flags;
2629
2630 if (!VMXNET3_VERSION_GE_3(adapter))
2631 return;
2632
2633 spin_lock_irqsave(&adapter->cmd_lock, flags);
2634 cmdInfo->varConf.confVer = 1;
2635 cmdInfo->varConf.confLen =
2636 cpu_to_le32(sizeof(*adapter->coal_conf));
2637 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2638
2639 if (adapter->default_coal_mode) {
2640 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2641 VMXNET3_CMD_GET_COALESCE);
2642 } else {
2643 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2644 VMXNET3_CMD_SET_COALESCE);
2645 }
2646
2647 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2648}
d1a890fa 2649
d3a8a9e5
RD
2650static void
2651vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2652{
2653 struct Vmxnet3_DriverShared *shared = adapter->shared;
2654 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2655 unsigned long flags;
2656
dacce2be
RD
2657 if (!VMXNET3_VERSION_GE_4(adapter))
2658 return;
d3a8a9e5
RD
2659
2660 spin_lock_irqsave(&adapter->cmd_lock, flags);
2661
2662 if (adapter->default_rss_fields) {
2663 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2664 VMXNET3_CMD_GET_RSS_FIELDS);
2665 adapter->rss_fields =
2666 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2667 } else {
2668 cmdInfo->setRssFields = adapter->rss_fields;
2669 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2670 VMXNET3_CMD_SET_RSS_FIELDS);
2671 /* Not all requested RSS may get applied, so get and
2672 * cache what was actually applied.
2673 */
2674 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2675 VMXNET3_CMD_GET_RSS_FIELDS);
2676 adapter->rss_fields =
2677 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2678 }
2679
2680 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2681}
2682
d1a890fa
SB
2683int
2684vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2685{
09c5088e 2686 int err, i;
d1a890fa 2687 u32 ret;
83d0feff 2688 unsigned long flags;
d1a890fa 2689
fdcd79b9 2690 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
09c5088e
SB
2691 " ring sizes %u %u %u\n", adapter->netdev->name,
2692 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2693 adapter->tx_queue[0].tx_ring.size,
2694 adapter->rx_queue[0].rx_ring[0].size,
2695 adapter->rx_queue[0].rx_ring[1].size);
2696
2697 vmxnet3_tq_init_all(adapter);
2698 err = vmxnet3_rq_init_all(adapter);
d1a890fa 2699 if (err) {
204a6e65
SH
2700 netdev_err(adapter->netdev,
2701 "Failed to init rx queue error %d\n", err);
d1a890fa
SB
2702 goto rq_err;
2703 }
2704
2705 err = vmxnet3_request_irqs(adapter);
2706 if (err) {
204a6e65
SH
2707 netdev_err(adapter->netdev,
2708 "Failed to setup irq for error %d\n", err);
d1a890fa
SB
2709 goto irq_err;
2710 }
2711
2712 vmxnet3_setup_driver_shared(adapter);
2713
115924b6
SB
2714 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2715 adapter->shared_pa));
2716 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2717 adapter->shared_pa));
83d0feff 2718 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
2719 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2720 VMXNET3_CMD_ACTIVATE_DEV);
2721 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
83d0feff 2722 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
2723
2724 if (ret != 0) {
204a6e65
SH
2725 netdev_err(adapter->netdev,
2726 "Failed to activate dev: error %u\n", ret);
d1a890fa
SB
2727 err = -EINVAL;
2728 goto activate_err;
2729 }
09c5088e 2730
4edef40e 2731 vmxnet3_init_coalesce(adapter);
d3a8a9e5 2732 vmxnet3_init_rssfields(adapter);
4edef40e 2733
09c5088e
SB
2734 for (i = 0; i < adapter->num_rx_queues; i++) {
2735 VMXNET3_WRITE_BAR0_REG(adapter,
2736 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2737 adapter->rx_queue[i].rx_ring[0].next2fill);
2738 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2739 (i * VMXNET3_REG_ALIGN)),
2740 adapter->rx_queue[i].rx_ring[1].next2fill);
2741 }
d1a890fa
SB
2742
2743 /* Apply the rx filter settins last. */
2744 vmxnet3_set_mc(adapter->netdev);
2745
2746 /*
2747 * Check link state when first activating device. It will start the
2748 * tx queue if the link is up.
2749 */
4a1745fc 2750 vmxnet3_check_link(adapter, true);
39f9895a 2751 netif_tx_wake_all_queues(adapter->netdev);
09c5088e
SB
2752 for (i = 0; i < adapter->num_rx_queues; i++)
2753 napi_enable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2754 vmxnet3_enable_all_intrs(adapter);
2755 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2756 return 0;
2757
2758activate_err:
2759 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2760 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2761 vmxnet3_free_irqs(adapter);
2762irq_err:
2763rq_err:
2764 /* free up buffers we allocated */
09c5088e 2765 vmxnet3_rq_cleanup_all(adapter);
d1a890fa
SB
2766 return err;
2767}
2768
2769
2770void
2771vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2772{
83d0feff
SB
2773 unsigned long flags;
2774 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa 2775 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
83d0feff 2776 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
2777}
2778
2779
2780int
2781vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2782{
09c5088e 2783 int i;
83d0feff 2784 unsigned long flags;
d1a890fa
SB
2785 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2786 return 0;
2787
2788
83d0feff 2789 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
2790 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2791 VMXNET3_CMD_QUIESCE_DEV);
83d0feff 2792 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
2793 vmxnet3_disable_all_intrs(adapter);
2794
09c5088e
SB
2795 for (i = 0; i < adapter->num_rx_queues; i++)
2796 napi_disable(&adapter->rx_queue[i].napi);
d1a890fa
SB
2797 netif_tx_disable(adapter->netdev);
2798 adapter->link_speed = 0;
2799 netif_carrier_off(adapter->netdev);
2800
09c5088e
SB
2801 vmxnet3_tq_cleanup_all(adapter);
2802 vmxnet3_rq_cleanup_all(adapter);
d1a890fa
SB
2803 vmxnet3_free_irqs(adapter);
2804 return 0;
2805}
2806
2807
2808static void
2809vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2810{
2811 u32 tmp;
2812
2813 tmp = *(u32 *)mac;
2814 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2815
2816 tmp = (mac[5] << 8) | mac[4];
2817 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2818}
2819
2820
2821static int
2822vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2823{
2824 struct sockaddr *addr = p;
2825 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2826
2827 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2828 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2829
2830 return 0;
2831}
2832
2833
2834/* ==================== initialization and cleanup routines ============ */
2835
2836static int
61aeecea 2837vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
d1a890fa
SB
2838{
2839 int err;
2840 unsigned long mmio_start, mmio_len;
2841 struct pci_dev *pdev = adapter->pdev;
2842
2843 err = pci_enable_device(pdev);
2844 if (err) {
204a6e65 2845 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
d1a890fa
SB
2846 return err;
2847 }
2848
d1a890fa
SB
2849 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2850 vmxnet3_driver_name);
2851 if (err) {
204a6e65
SH
2852 dev_err(&pdev->dev,
2853 "Failed to request region for adapter: error %d\n", err);
61aeecea 2854 goto err_enable_device;
d1a890fa
SB
2855 }
2856
2857 pci_set_master(pdev);
2858
2859 mmio_start = pci_resource_start(pdev, 0);
2860 mmio_len = pci_resource_len(pdev, 0);
2861 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2862 if (!adapter->hw_addr0) {
204a6e65 2863 dev_err(&pdev->dev, "Failed to map bar0\n");
d1a890fa
SB
2864 err = -EIO;
2865 goto err_ioremap;
2866 }
2867
2868 mmio_start = pci_resource_start(pdev, 1);
2869 mmio_len = pci_resource_len(pdev, 1);
2870 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2871 if (!adapter->hw_addr1) {
204a6e65 2872 dev_err(&pdev->dev, "Failed to map bar1\n");
d1a890fa
SB
2873 err = -EIO;
2874 goto err_bar1;
2875 }
2876 return 0;
2877
2878err_bar1:
2879 iounmap(adapter->hw_addr0);
2880err_ioremap:
2881 pci_release_selected_regions(pdev, (1 << 2) - 1);
61aeecea 2882err_enable_device:
d1a890fa
SB
2883 pci_disable_device(pdev);
2884 return err;
2885}
2886
2887
2888static void
2889vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2890{
2891 BUG_ON(!adapter->pdev);
2892
2893 iounmap(adapter->hw_addr0);
2894 iounmap(adapter->hw_addr1);
2895 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2896 pci_disable_device(adapter->pdev);
2897}
2898
2899
2900static void
2901vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2902{
09c5088e 2903 size_t sz, i, ring0_size, ring1_size, comp_size;
d1a890fa
SB
2904 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2905 VMXNET3_MAX_ETH_HDR_SIZE) {
2906 adapter->skb_buf_size = adapter->netdev->mtu +
2907 VMXNET3_MAX_ETH_HDR_SIZE;
2908 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2909 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2910
2911 adapter->rx_buf_per_pkt = 1;
2912 } else {
2913 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2914 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2915 VMXNET3_MAX_ETH_HDR_SIZE;
2916 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2917 }
2918
2919 /*
2920 * for simplicity, force the ring0 size to be a multiple of
2921 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2922 */
2923 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
09c5088e
SB
2924 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2925 ring0_size = (ring0_size + sz - 1) / sz * sz;
a53255d3 2926 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
09c5088e
SB
2927 sz * sz);
2928 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
53831aa1
SK
2929 ring1_size = (ring1_size + sz - 1) / sz * sz;
2930 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2931 sz * sz);
09c5088e
SB
2932 comp_size = ring0_size + ring1_size;
2933
2934 for (i = 0; i < adapter->num_rx_queues; i++) {
5e264e2b
CIK
2935 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2936
09c5088e
SB
2937 rq->rx_ring[0].size = ring0_size;
2938 rq->rx_ring[1].size = ring1_size;
2939 rq->comp_ring.size = comp_size;
2940 }
d1a890fa
SB
2941}
2942
2943
2944int
2945vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3c8b3efc 2946 u32 rx_ring_size, u32 rx_ring2_size,
50a5ce3e 2947 u16 txdata_desc_size, u16 rxdata_desc_size)
d1a890fa 2948{
09c5088e
SB
2949 int err = 0, i;
2950
2951 for (i = 0; i < adapter->num_tx_queues; i++) {
2952 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2953 tq->tx_ring.size = tx_ring_size;
2954 tq->data_ring.size = tx_ring_size;
2955 tq->comp_ring.size = tx_ring_size;
3c8b3efc 2956 tq->txdata_desc_size = txdata_desc_size;
09c5088e
SB
2957 tq->shared = &adapter->tqd_start[i].ctrl;
2958 tq->stopped = true;
2959 tq->adapter = adapter;
2960 tq->qid = i;
2961 err = vmxnet3_tq_create(tq, adapter);
2962 /*
2963 * Too late to change num_tx_queues. We cannot do away with
2964 * lesser number of queues than what we asked for
2965 */
2966 if (err)
2967 goto queue_err;
2968 }
d1a890fa 2969
09c5088e
SB
2970 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2971 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
d1a890fa 2972 vmxnet3_adjust_rx_ring_size(adapter);
50a5ce3e
SK
2973
2974 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
09c5088e
SB
2975 for (i = 0; i < adapter->num_rx_queues; i++) {
2976 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2977 /* qid and qid2 for rx queues will be assigned later when num
2978 * of rx queues is finalized after allocating intrs */
2979 rq->shared = &adapter->rqd_start[i].ctrl;
2980 rq->adapter = adapter;
50a5ce3e 2981 rq->data_ring.desc_size = rxdata_desc_size;
09c5088e
SB
2982 err = vmxnet3_rq_create(rq, adapter);
2983 if (err) {
2984 if (i == 0) {
204a6e65
SH
2985 netdev_err(adapter->netdev,
2986 "Could not allocate any rx queues. "
2987 "Aborting.\n");
09c5088e
SB
2988 goto queue_err;
2989 } else {
204a6e65
SH
2990 netdev_info(adapter->netdev,
2991 "Number of rx queues changed "
2992 "to : %d.\n", i);
09c5088e
SB
2993 adapter->num_rx_queues = i;
2994 err = 0;
2995 break;
2996 }
2997 }
2998 }
50a5ce3e
SK
2999
3000 if (!adapter->rxdataring_enabled)
3001 vmxnet3_rq_destroy_all_rxdataring(adapter);
3002
09c5088e
SB
3003 return err;
3004queue_err:
3005 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
3006 return err;
3007}
3008
3009static int
3010vmxnet3_open(struct net_device *netdev)
3011{
3012 struct vmxnet3_adapter *adapter;
09c5088e 3013 int err, i;
d1a890fa
SB
3014
3015 adapter = netdev_priv(netdev);
3016
09c5088e
SB
3017 for (i = 0; i < adapter->num_tx_queues; i++)
3018 spin_lock_init(&adapter->tx_queue[i].tx_lock);
d1a890fa 3019
3c8b3efc
SK
3020 if (VMXNET3_VERSION_GE_3(adapter)) {
3021 unsigned long flags;
3022 u16 txdata_desc_size;
3023
3024 spin_lock_irqsave(&adapter->cmd_lock, flags);
3025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3026 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3027 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
3028 VMXNET3_REG_CMD);
3029 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3030
3031 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3032 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3033 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3034 adapter->txdata_desc_size =
3035 sizeof(struct Vmxnet3_TxDataDesc);
3036 } else {
3037 adapter->txdata_desc_size = txdata_desc_size;
3038 }
3039 } else {
3040 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3041 }
3042
3043 err = vmxnet3_create_queues(adapter,
3044 adapter->tx_ring_size,
f00e2b0a 3045 adapter->rx_ring_size,
3c8b3efc 3046 adapter->rx_ring2_size,
50a5ce3e
SK
3047 adapter->txdata_desc_size,
3048 adapter->rxdata_desc_size);
d1a890fa
SB
3049 if (err)
3050 goto queue_err;
3051
3052 err = vmxnet3_activate_dev(adapter);
3053 if (err)
3054 goto activate_err;
3055
3056 return 0;
3057
3058activate_err:
09c5088e
SB
3059 vmxnet3_rq_destroy_all(adapter);
3060 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
3061queue_err:
3062 return err;
3063}
3064
3065
3066static int
3067vmxnet3_close(struct net_device *netdev)
3068{
3069 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3070
3071 /*
3072 * Reset_work may be in the middle of resetting the device, wait for its
3073 * completion.
3074 */
3075 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
93c65d13 3076 usleep_range(1000, 2000);
d1a890fa
SB
3077
3078 vmxnet3_quiesce_dev(adapter);
3079
09c5088e
SB
3080 vmxnet3_rq_destroy_all(adapter);
3081 vmxnet3_tq_destroy_all(adapter);
d1a890fa
SB
3082
3083 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3084
3085
3086 return 0;
3087}
3088
3089
3090void
3091vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3092{
09c5088e
SB
3093 int i;
3094
d1a890fa
SB
3095 /*
3096 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3097 * vmxnet3_close() will deadlock.
3098 */
3099 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3100
3101 /* we need to enable NAPI, otherwise dev_close will deadlock */
09c5088e
SB
3102 for (i = 0; i < adapter->num_rx_queues; i++)
3103 napi_enable(&adapter->rx_queue[i].napi);
1c4d5f51
NH
3104 /*
3105 * Need to clear the quiesce bit to ensure that vmxnet3_close
3106 * can quiesce the device properly
3107 */
3108 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
d1a890fa
SB
3109 dev_close(adapter->netdev);
3110}
3111
3112
3113static int
3114vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3115{
3116 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3117 int err = 0;
3118
d1a890fa
SB
3119 netdev->mtu = new_mtu;
3120
3121 /*
3122 * Reset_work may be in the middle of resetting the device, wait for its
3123 * completion.
3124 */
3125 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
93c65d13 3126 usleep_range(1000, 2000);
d1a890fa
SB
3127
3128 if (netif_running(netdev)) {
3129 vmxnet3_quiesce_dev(adapter);
3130 vmxnet3_reset_dev(adapter);
3131
3132 /* we need to re-create the rx queue based on the new mtu */
09c5088e 3133 vmxnet3_rq_destroy_all(adapter);
d1a890fa 3134 vmxnet3_adjust_rx_ring_size(adapter);
09c5088e 3135 err = vmxnet3_rq_create_all(adapter);
d1a890fa 3136 if (err) {
204a6e65
SH
3137 netdev_err(netdev,
3138 "failed to re-create rx queues, "
3139 " error %d. Closing it.\n", err);
d1a890fa
SB
3140 goto out;
3141 }
3142
3143 err = vmxnet3_activate_dev(adapter);
3144 if (err) {
204a6e65
SH
3145 netdev_err(netdev,
3146 "failed to re-activate, error %d. "
3147 "Closing it\n", err);
d1a890fa
SB
3148 goto out;
3149 }
3150 }
3151
3152out:
3153 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3154 if (err)
3155 vmxnet3_force_close(adapter);
3156
3157 return err;
3158}
3159
3160
3161static void
3162vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3163{
3164 struct net_device *netdev = adapter->netdev;
3165
a0d2730c 3166 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
f646968f
PM
3167 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3168 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
72e85c45 3169 NETIF_F_LRO;
dacce2be
RD
3170
3171 if (VMXNET3_VERSION_GE_4(adapter)) {
3172 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3173 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3174
3175 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3176 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3177 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3178 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3179 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3180 }
3181
a0d2730c 3182 if (dma64)
ebbf9295 3183 netdev->hw_features |= NETIF_F_HIGHDMA;
72e85c45 3184 netdev->vlan_features = netdev->hw_features &
f646968f
PM
3185 ~(NETIF_F_HW_VLAN_CTAG_TX |
3186 NETIF_F_HW_VLAN_CTAG_RX);
3187 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
d1a890fa
SB
3188}
3189
3190
3191static void
3192vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3193{
3194 u32 tmp;
3195
3196 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3197 *(u32 *)mac = tmp;
3198
3199 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3200 mac[4] = tmp & 0xff;
3201 mac[5] = (tmp >> 8) & 0xff;
3202}
3203
09c5088e
SB
3204#ifdef CONFIG_PCI_MSI
3205
3206/*
3207 * Enable MSIx vectors.
3208 * Returns :
25985edc 3209 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
b60b869d
AG
3210 * were enabled.
3211 * number of vectors which were enabled otherwise (this number is greater
09c5088e
SB
3212 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3213 */
3214
3215static int
b60b869d 3216vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
09c5088e 3217{
c0a1be38
AG
3218 int ret = pci_enable_msix_range(adapter->pdev,
3219 adapter->intr.msix_entries, nvec, nvec);
09c5088e 3220
c0a1be38
AG
3221 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3222 dev_err(&adapter->netdev->dev,
3223 "Failed to enable %d MSI-X, trying %d\n",
3224 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3225
3226 ret = pci_enable_msix_range(adapter->pdev,
3227 adapter->intr.msix_entries,
3228 VMXNET3_LINUX_MIN_MSIX_VECT,
3229 VMXNET3_LINUX_MIN_MSIX_VECT);
3230 }
3231
3232 if (ret < 0) {
3233 dev_err(&adapter->netdev->dev,
3234 "Failed to enable MSI-X, error: %d\n", ret);
3235 }
3236
3237 return ret;
09c5088e
SB
3238}
3239
3240
3241#endif /* CONFIG_PCI_MSI */
d1a890fa
SB
3242
3243static void
3244vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3245{
3246 u32 cfg;
e328d410 3247 unsigned long flags;
d1a890fa
SB
3248
3249 /* intr settings */
e328d410 3250 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
3251 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3252 VMXNET3_CMD_GET_CONF_INTR);
3253 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
e328d410 3254 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
3255 adapter->intr.type = cfg & 0x3;
3256 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3257
3258 if (adapter->intr.type == VMXNET3_IT_AUTO) {
0bdc0d70
SB
3259 adapter->intr.type = VMXNET3_IT_MSIX;
3260 }
d1a890fa 3261
8f7e524c 3262#ifdef CONFIG_PCI_MSI
0bdc0d70 3263 if (adapter->intr.type == VMXNET3_IT_MSIX) {
b60b869d
AG
3264 int i, nvec;
3265
3266 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3267 1 : adapter->num_tx_queues;
3268 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3269 0 : adapter->num_rx_queues;
3270 nvec += 1; /* for link event */
3271 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3272 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3273
3274 for (i = 0; i < nvec; i++)
3275 adapter->intr.msix_entries[i].entry = i;
3276
3277 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3278 if (nvec < 0)
3279 goto msix_err;
3280
09c5088e
SB
3281 /* If we cannot allocate one MSIx vector per queue
3282 * then limit the number of rx queues to 1
3283 */
b60b869d 3284 if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
09c5088e 3285 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
7e96fbf2 3286 || adapter->num_rx_queues != 1) {
09c5088e 3287 adapter->share_intr = VMXNET3_INTR_TXSHARE;
204a6e65
SH
3288 netdev_err(adapter->netdev,
3289 "Number of rx queues : 1\n");
09c5088e 3290 adapter->num_rx_queues = 1;
09c5088e 3291 }
d1a890fa 3292 }
09c5088e 3293
b60b869d
AG
3294 adapter->intr.num_intrs = nvec;
3295 return;
3296
3297msix_err:
09c5088e 3298 /* If we cannot allocate MSIx vectors use only one rx queue */
4bad25fa
SH
3299 dev_info(&adapter->pdev->dev,
3300 "Failed to enable MSI-X, error %d. "
b60b869d 3301 "Limiting #rx queues to 1, try MSI.\n", nvec);
09c5088e 3302
0bdc0d70
SB
3303 adapter->intr.type = VMXNET3_IT_MSI;
3304 }
d1a890fa 3305
0bdc0d70 3306 if (adapter->intr.type == VMXNET3_IT_MSI) {
b60b869d 3307 if (!pci_enable_msi(adapter->pdev)) {
09c5088e 3308 adapter->num_rx_queues = 1;
d1a890fa 3309 adapter->intr.num_intrs = 1;
d1a890fa
SB
3310 return;
3311 }
3312 }
0bdc0d70 3313#endif /* CONFIG_PCI_MSI */
d1a890fa 3314
09c5088e 3315 adapter->num_rx_queues = 1;
204a6e65
SH
3316 dev_info(&adapter->netdev->dev,
3317 "Using INTx interrupt, #Rx queues: 1.\n");
d1a890fa
SB
3318 adapter->intr.type = VMXNET3_IT_INTX;
3319
3320 /* INT-X related setting */
3321 adapter->intr.num_intrs = 1;
3322}
3323
3324
3325static void
3326vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3327{
3328 if (adapter->intr.type == VMXNET3_IT_MSIX)
3329 pci_disable_msix(adapter->pdev);
3330 else if (adapter->intr.type == VMXNET3_IT_MSI)
3331 pci_disable_msi(adapter->pdev);
3332 else
3333 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3334}
3335
3336
3337static void
0290bd29 3338vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
d1a890fa
SB
3339{
3340 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3341 adapter->tx_timeout_count++;
3342
204a6e65 3343 netdev_err(adapter->netdev, "tx hang\n");
d1a890fa
SB
3344 schedule_work(&adapter->work);
3345}
3346
3347
3348static void
3349vmxnet3_reset_work(struct work_struct *data)
3350{
3351 struct vmxnet3_adapter *adapter;
3352
3353 adapter = container_of(data, struct vmxnet3_adapter, work);
3354
3355 /* if another thread is resetting the device, no need to proceed */
3356 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3357 return;
3358
3359 /* if the device is closed, we must leave it alone */
d9a5f210 3360 rtnl_lock();
d1a890fa 3361 if (netif_running(adapter->netdev)) {
204a6e65 3362 netdev_notice(adapter->netdev, "resetting\n");
d1a890fa
SB
3363 vmxnet3_quiesce_dev(adapter);
3364 vmxnet3_reset_dev(adapter);
3365 vmxnet3_activate_dev(adapter);
3366 } else {
204a6e65 3367 netdev_info(adapter->netdev, "already closed\n");
d1a890fa 3368 }
d9a5f210 3369 rtnl_unlock();
d1a890fa 3370
277964e1 3371 netif_wake_queue(adapter->netdev);
d1a890fa
SB
3372 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3373}
3374
3375
3a4751a3 3376static int
d1a890fa
SB
3377vmxnet3_probe_device(struct pci_dev *pdev,
3378 const struct pci_device_id *id)
3379{
3380 static const struct net_device_ops vmxnet3_netdev_ops = {
3381 .ndo_open = vmxnet3_open,
3382 .ndo_stop = vmxnet3_close,
3383 .ndo_start_xmit = vmxnet3_xmit_frame,
3384 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3385 .ndo_change_mtu = vmxnet3_change_mtu,
3dd7400b 3386 .ndo_fix_features = vmxnet3_fix_features,
a0d2730c 3387 .ndo_set_features = vmxnet3_set_features,
1dac3b1b 3388 .ndo_features_check = vmxnet3_features_check,
95305f6c 3389 .ndo_get_stats64 = vmxnet3_get_stats64,
d1a890fa 3390 .ndo_tx_timeout = vmxnet3_tx_timeout,
afc4b13d 3391 .ndo_set_rx_mode = vmxnet3_set_mc,
d1a890fa
SB
3392 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3393 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3394#ifdef CONFIG_NET_POLL_CONTROLLER
3395 .ndo_poll_controller = vmxnet3_netpoll,
3396#endif
3397 };
3398 int err;
61aeecea 3399 bool dma64;
d1a890fa
SB
3400 u32 ver;
3401 struct net_device *netdev;
3402 struct vmxnet3_adapter *adapter;
3403 u8 mac[ETH_ALEN];
09c5088e
SB
3404 int size;
3405 int num_tx_queues;
3406 int num_rx_queues;
39f9895a
RD
3407 int queues;
3408 unsigned long flags;
09c5088e 3409
e154b639
SB
3410 if (!pci_msi_enabled())
3411 enable_mq = 0;
3412
09c5088e
SB
3413#ifdef VMXNET3_RSS
3414 if (enable_mq)
3415 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3416 (int)num_online_cpus());
3417 else
3418#endif
3419 num_rx_queues = 1;
3420
3421 if (enable_mq)
3422 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3423 (int)num_online_cpus());
3424 else
3425 num_tx_queues = 1;
3426
3427 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3428 max(num_tx_queues, num_rx_queues));
41de8d4c 3429 if (!netdev)
d1a890fa 3430 return -ENOMEM;
d1a890fa
SB
3431
3432 pci_set_drvdata(pdev, netdev);
3433 adapter = netdev_priv(netdev);
3434 adapter->netdev = netdev;
3435 adapter->pdev = pdev;
3436
f00e2b0a
NH
3437 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3438 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
53831aa1 3439 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
f00e2b0a 3440
bf7bec46 3441 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
61aeecea 3442 dma64 = true;
3443 } else {
bf7bec46
CJ
3444 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3445 if (err) {
3446 dev_err(&pdev->dev, "dma_set_mask failed\n");
61aeecea 3447 goto err_set_mask;
3448 }
3449 dma64 = false;
3450 }
3451
83d0feff 3452 spin_lock_init(&adapter->cmd_lock);
b0eb57cb
AK
3453 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3454 sizeof(struct vmxnet3_adapter),
bf7bec46 3455 DMA_TO_DEVICE);
5738a09d
AK
3456 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3457 dev_err(&pdev->dev, "Failed to map dma\n");
3458 err = -EFAULT;
61aeecea 3459 goto err_set_mask;
5738a09d 3460 }
b0eb57cb
AK
3461 adapter->shared = dma_alloc_coherent(
3462 &adapter->pdev->dev,
3463 sizeof(struct Vmxnet3_DriverShared),
3464 &adapter->shared_pa, GFP_KERNEL);
d1a890fa 3465 if (!adapter->shared) {
204a6e65 3466 dev_err(&pdev->dev, "Failed to allocate memory\n");
d1a890fa
SB
3467 err = -ENOMEM;
3468 goto err_alloc_shared;
3469 }
3470
61aeecea 3471 err = vmxnet3_alloc_pci_resources(adapter);
d1a890fa
SB
3472 if (err < 0)
3473 goto err_alloc_pci;
3474
3475 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
ce2639ad
RD
3476 if (ver & (1 << VMXNET3_REV_6)) {
3477 VMXNET3_WRITE_BAR1_REG(adapter,
3478 VMXNET3_REG_VRRS,
3479 1 << VMXNET3_REV_6);
3480 adapter->version = VMXNET3_REV_6 + 1;
3481 } else if (ver & (1 << VMXNET3_REV_5)) {
3482 VMXNET3_WRITE_BAR1_REG(adapter,
3483 VMXNET3_REG_VRRS,
3484 1 << VMXNET3_REV_5);
3485 adapter->version = VMXNET3_REV_5 + 1;
3486 } else if (ver & (1 << VMXNET3_REV_4)) {
a31135e3
RD
3487 VMXNET3_WRITE_BAR1_REG(adapter,
3488 VMXNET3_REG_VRRS,
3489 1 << VMXNET3_REV_4);
3490 adapter->version = VMXNET3_REV_4 + 1;
3491 } else if (ver & (1 << VMXNET3_REV_3)) {
6af9d787
SK
3492 VMXNET3_WRITE_BAR1_REG(adapter,
3493 VMXNET3_REG_VRRS,
3494 1 << VMXNET3_REV_3);
3495 adapter->version = VMXNET3_REV_3 + 1;
3496 } else if (ver & (1 << VMXNET3_REV_2)) {
190af10f
SK
3497 VMXNET3_WRITE_BAR1_REG(adapter,
3498 VMXNET3_REG_VRRS,
3499 1 << VMXNET3_REV_2);
3500 adapter->version = VMXNET3_REV_2 + 1;
3501 } else if (ver & (1 << VMXNET3_REV_1)) {
3502 VMXNET3_WRITE_BAR1_REG(adapter,
3503 VMXNET3_REG_VRRS,
3504 1 << VMXNET3_REV_1);
3505 adapter->version = VMXNET3_REV_1 + 1;
d1a890fa 3506 } else {
204a6e65
SH
3507 dev_err(&pdev->dev,
3508 "Incompatible h/w version (0x%x) for adapter\n", ver);
d1a890fa
SB
3509 err = -EBUSY;
3510 goto err_ver;
3511 }
45dac1d6 3512 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
d1a890fa
SB
3513
3514 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3515 if (ver & 1) {
3516 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3517 } else {
204a6e65
SH
3518 dev_err(&pdev->dev,
3519 "Incompatible upt version (0x%x) for adapter\n", ver);
d1a890fa
SB
3520 err = -EBUSY;
3521 goto err_ver;
3522 }
3523
39f9895a
RD
3524 if (VMXNET3_VERSION_GE_6(adapter)) {
3525 spin_lock_irqsave(&adapter->cmd_lock, flags);
3526 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3527 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3528 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3529 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3530 if (queues > 0) {
3531 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3532 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3533 } else {
3534 adapter->num_rx_queues = min(num_rx_queues,
3535 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3536 adapter->num_tx_queues = min(num_tx_queues,
3537 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3538 }
3539 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3540 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3541 adapter->queuesExtEnabled = true;
3542 } else {
3543 adapter->queuesExtEnabled = false;
3544 }
3545 } else {
3546 adapter->queuesExtEnabled = false;
15ccf2f4
RD
3547 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3548 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
39f9895a
RD
3549 adapter->num_rx_queues = min(num_rx_queues,
3550 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3551 adapter->num_tx_queues = min(num_tx_queues,
3552 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3553 }
3554 dev_info(&pdev->dev,
3555 "# of Tx queues : %d, # of Rx queues : %d\n",
3556 adapter->num_tx_queues, adapter->num_rx_queues);
3557
3558 adapter->rx_buf_per_pkt = 1;
3559
3560 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3561 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3562 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3563 &adapter->queue_desc_pa,
3564 GFP_KERNEL);
3565
3566 if (!adapter->tqd_start) {
3567 dev_err(&pdev->dev, "Failed to allocate memory\n");
3568 err = -ENOMEM;
3569 goto err_ver;
3570 }
3571 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3572 adapter->num_tx_queues);
3573
3574 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3575 sizeof(struct Vmxnet3_PMConf),
3576 &adapter->pm_conf_pa,
3577 GFP_KERNEL);
3578 if (adapter->pm_conf == NULL) {
3579 err = -ENOMEM;
3580 goto err_alloc_pm;
3581 }
3582
3583#ifdef VMXNET3_RSS
3584
3585 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3586 sizeof(struct UPT1_RSSConf),
3587 &adapter->rss_conf_pa,
3588 GFP_KERNEL);
3589 if (adapter->rss_conf == NULL) {
3590 err = -ENOMEM;
3591 goto err_alloc_rss;
3592 }
3593#endif /* VMXNET3_RSS */
3594
4edef40e
SK
3595 if (VMXNET3_VERSION_GE_3(adapter)) {
3596 adapter->coal_conf =
3597 dma_alloc_coherent(&adapter->pdev->dev,
3598 sizeof(struct Vmxnet3_CoalesceScheme)
3599 ,
3600 &adapter->coal_conf_pa,
3601 GFP_KERNEL);
3602 if (!adapter->coal_conf) {
3603 err = -ENOMEM;
39f9895a 3604 goto err_coal_conf;
4edef40e 3605 }
4edef40e
SK
3606 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3607 adapter->default_coal_mode = true;
3608 }
3609
d3a8a9e5
RD
3610 if (VMXNET3_VERSION_GE_4(adapter)) {
3611 adapter->default_rss_fields = true;
3612 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
3613 }
3614
e101e7dd 3615 SET_NETDEV_DEV(netdev, &pdev->dev);
d1a890fa
SB
3616 vmxnet3_declare_features(adapter, dma64);
3617
50a5ce3e
SK
3618 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3619 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3620
4db37a78
SH
3621 if (adapter->num_tx_queues == adapter->num_rx_queues)
3622 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3623 else
09c5088e
SB
3624 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3625
d1a890fa
SB
3626 vmxnet3_alloc_intr_resources(adapter);
3627
09c5088e
SB
3628#ifdef VMXNET3_RSS
3629 if (adapter->num_rx_queues > 1 &&
3630 adapter->intr.type == VMXNET3_IT_MSIX) {
3631 adapter->rss = true;
7db11f75
SH
3632 netdev->hw_features |= NETIF_F_RXHASH;
3633 netdev->features |= NETIF_F_RXHASH;
204a6e65 3634 dev_dbg(&pdev->dev, "RSS is enabled.\n");
09c5088e
SB
3635 } else {
3636 adapter->rss = false;
3637 }
3638#endif
3639
d1a890fa
SB
3640 vmxnet3_read_mac_addr(adapter, mac);
3641 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3642
3643 netdev->netdev_ops = &vmxnet3_netdev_ops;
d1a890fa 3644 vmxnet3_set_ethtool_ops(netdev);
09c5088e 3645 netdev->watchdog_timeo = 5 * HZ;
d1a890fa 3646
8c5663e4 3647 /* MTU range: 60 - 9190 */
d0c2c997 3648 netdev->min_mtu = VMXNET3_MIN_MTU;
8c5663e4
RD
3649 if (VMXNET3_VERSION_GE_6(adapter))
3650 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
3651 else
3652 netdev->max_mtu = VMXNET3_MAX_MTU;
d0c2c997 3653
d1a890fa 3654 INIT_WORK(&adapter->work, vmxnet3_reset_work);
e3bc4ffb 3655 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
d1a890fa 3656
09c5088e
SB
3657 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3658 int i;
3659 for (i = 0; i < adapter->num_rx_queues; i++) {
3660 netif_napi_add(adapter->netdev,
3661 &adapter->rx_queue[i].napi,
3662 vmxnet3_poll_rx_only, 64);
3663 }
3664 } else {
3665 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3666 vmxnet3_poll, 64);
3667 }
3668
3669 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3670 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3671
6cdd20c3 3672 netif_carrier_off(netdev);
d1a890fa
SB
3673 err = register_netdev(netdev);
3674
3675 if (err) {
204a6e65 3676 dev_err(&pdev->dev, "Failed to register adapter\n");
d1a890fa
SB
3677 goto err_register;
3678 }
3679
4a1745fc 3680 vmxnet3_check_link(adapter, false);
d1a890fa
SB
3681 return 0;
3682
3683err_register:
4edef40e
SK
3684 if (VMXNET3_VERSION_GE_3(adapter)) {
3685 dma_free_coherent(&adapter->pdev->dev,
3686 sizeof(struct Vmxnet3_CoalesceScheme),
3687 adapter->coal_conf, adapter->coal_conf_pa);
3688 }
d1a890fa 3689 vmxnet3_free_intr_resources(adapter);
39f9895a 3690err_coal_conf:
09c5088e 3691#ifdef VMXNET3_RSS
b0eb57cb
AK
3692 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3693 adapter->rss_conf, adapter->rss_conf_pa);
09c5088e
SB
3694err_alloc_rss:
3695#endif
b0eb57cb
AK
3696 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3697 adapter->pm_conf, adapter->pm_conf_pa);
d1a890fa 3698err_alloc_pm:
b0eb57cb
AK
3699 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3700 adapter->queue_desc_pa);
39f9895a
RD
3701err_ver:
3702 vmxnet3_free_pci_resources(adapter);
3703err_alloc_pci:
b0eb57cb
AK
3704 dma_free_coherent(&adapter->pdev->dev,
3705 sizeof(struct Vmxnet3_DriverShared),
3706 adapter->shared, adapter->shared_pa);
d1a890fa 3707err_alloc_shared:
b0eb57cb 3708 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
bf7bec46 3709 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
61aeecea 3710err_set_mask:
d1a890fa
SB
3711 free_netdev(netdev);
3712 return err;
3713}
3714
3715
3a4751a3 3716static void
d1a890fa
SB
3717vmxnet3_remove_device(struct pci_dev *pdev)
3718{
3719 struct net_device *netdev = pci_get_drvdata(pdev);
3720 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
09c5088e 3721 int size = 0;
39f9895a
RD
3722 int num_rx_queues, rx_queues;
3723 unsigned long flags;
09c5088e
SB
3724
3725#ifdef VMXNET3_RSS
3726 if (enable_mq)
3727 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3728 (int)num_online_cpus());
3729 else
3730#endif
3731 num_rx_queues = 1;
15ccf2f4
RD
3732 if (!VMXNET3_VERSION_GE_6(adapter)) {
3733 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3734 }
39f9895a
RD
3735 if (VMXNET3_VERSION_GE_6(adapter)) {
3736 spin_lock_irqsave(&adapter->cmd_lock, flags);
3737 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3738 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3739 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3740 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3741 if (rx_queues > 0)
3742 rx_queues = (rx_queues >> 8) & 0xff;
3743 else
3744 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3745 num_rx_queues = min(num_rx_queues, rx_queues);
3746 } else {
3747 num_rx_queues = min(num_rx_queues,
3748 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3749 }
d1a890fa 3750
23f333a2 3751 cancel_work_sync(&adapter->work);
d1a890fa
SB
3752
3753 unregister_netdev(netdev);
3754
3755 vmxnet3_free_intr_resources(adapter);
3756 vmxnet3_free_pci_resources(adapter);
4edef40e
SK
3757 if (VMXNET3_VERSION_GE_3(adapter)) {
3758 dma_free_coherent(&adapter->pdev->dev,
3759 sizeof(struct Vmxnet3_CoalesceScheme),
3760 adapter->coal_conf, adapter->coal_conf_pa);
3761 }
09c5088e 3762#ifdef VMXNET3_RSS
b0eb57cb
AK
3763 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3764 adapter->rss_conf, adapter->rss_conf_pa);
09c5088e 3765#endif
b0eb57cb
AK
3766 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3767 adapter->pm_conf, adapter->pm_conf_pa);
09c5088e
SB
3768
3769 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3770 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
b0eb57cb
AK
3771 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3772 adapter->queue_desc_pa);
3773 dma_free_coherent(&adapter->pdev->dev,
3774 sizeof(struct Vmxnet3_DriverShared),
3775 adapter->shared, adapter->shared_pa);
3776 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
bf7bec46 3777 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
d1a890fa
SB
3778 free_netdev(netdev);
3779}
3780
e9ba47bf
SB
3781static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3782{
3783 struct net_device *netdev = pci_get_drvdata(pdev);
3784 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3785 unsigned long flags;
3786
3787 /* Reset_work may be in the middle of resetting the device, wait for its
3788 * completion.
3789 */
3790 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
93c65d13 3791 usleep_range(1000, 2000);
e9ba47bf
SB
3792
3793 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3794 &adapter->state)) {
3795 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3796 return;
3797 }
3798 spin_lock_irqsave(&adapter->cmd_lock, flags);
3799 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3800 VMXNET3_CMD_QUIESCE_DEV);
3801 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3802 vmxnet3_disable_all_intrs(adapter);
3803
3804 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3805}
3806
d1a890fa
SB
3807
3808#ifdef CONFIG_PM
3809
3810static int
3811vmxnet3_suspend(struct device *device)
3812{
3813 struct pci_dev *pdev = to_pci_dev(device);
3814 struct net_device *netdev = pci_get_drvdata(pdev);
3815 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3816 struct Vmxnet3_PMConf *pmConf;
3817 struct ethhdr *ehdr;
3818 struct arphdr *ahdr;
3819 u8 *arpreq;
3820 struct in_device *in_dev;
3821 struct in_ifaddr *ifa;
83d0feff 3822 unsigned long flags;
d1a890fa
SB
3823 int i = 0;
3824
3825 if (!netif_running(netdev))
3826 return 0;
3827
51956cd6
SB
3828 for (i = 0; i < adapter->num_rx_queues; i++)
3829 napi_disable(&adapter->rx_queue[i].napi);
3830
d1a890fa
SB
3831 vmxnet3_disable_all_intrs(adapter);
3832 vmxnet3_free_irqs(adapter);
3833 vmxnet3_free_intr_resources(adapter);
3834
3835 netif_device_detach(netdev);
d1a890fa
SB
3836
3837 /* Create wake-up filters. */
3838 pmConf = adapter->pm_conf;
3839 memset(pmConf, 0, sizeof(*pmConf));
3840
3841 if (adapter->wol & WAKE_UCAST) {
3842 pmConf->filters[i].patternSize = ETH_ALEN;
3843 pmConf->filters[i].maskSize = 1;
3844 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3845 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3846
3843e515 3847 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
d1a890fa
SB
3848 i++;
3849 }
3850
3851 if (adapter->wol & WAKE_ARP) {
2638eb8b
FW
3852 rcu_read_lock();
3853
3854 in_dev = __in_dev_get_rcu(netdev);
3855 if (!in_dev) {
3856 rcu_read_unlock();
d1a890fa 3857 goto skip_arp;
2638eb8b 3858 }
d1a890fa 3859
2638eb8b
FW
3860 ifa = rcu_dereference(in_dev->ifa_list);
3861 if (!ifa) {
3862 rcu_read_unlock();
d1a890fa 3863 goto skip_arp;
2638eb8b 3864 }
d1a890fa
SB
3865
3866 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3867 sizeof(struct arphdr) + /* ARP header */
3868 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3869 2 * sizeof(u32); /*2 IPv4 addresses */
3870 pmConf->filters[i].maskSize =
3871 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3872
3873 /* ETH_P_ARP in Ethernet header. */
3874 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3875 ehdr->h_proto = htons(ETH_P_ARP);
3876
3877 /* ARPOP_REQUEST in ARP header. */
3878 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3879 ahdr->ar_op = htons(ARPOP_REQUEST);
3880 arpreq = (u8 *)(ahdr + 1);
3881
3882 /* The Unicast IPv4 address in 'tip' field. */
3883 arpreq += 2 * ETH_ALEN + sizeof(u32);
2638eb8b
FW
3884 *(__be32 *)arpreq = ifa->ifa_address;
3885
3886 rcu_read_unlock();
d1a890fa
SB
3887
3888 /* The mask for the relevant bits. */
3889 pmConf->filters[i].mask[0] = 0x00;
3890 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3891 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3892 pmConf->filters[i].mask[3] = 0x00;
3893 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3894 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
d1a890fa 3895
3843e515 3896 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
d1a890fa
SB
3897 i++;
3898 }
3899
3900skip_arp:
3901 if (adapter->wol & WAKE_MAGIC)
3843e515 3902 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
d1a890fa
SB
3903
3904 pmConf->numFilters = i;
3905
115924b6
SB
3906 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3907 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3908 *pmConf));
b0eb57cb
AK
3909 adapter->shared->devRead.pmConfDesc.confPA =
3910 cpu_to_le64(adapter->pm_conf_pa);
d1a890fa 3911
83d0feff 3912 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa
SB
3913 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3914 VMXNET3_CMD_UPDATE_PMCFG);
83d0feff 3915 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
d1a890fa
SB
3916
3917 pci_save_state(pdev);
3918 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3919 adapter->wol);
3920 pci_disable_device(pdev);
3921 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3922
3923 return 0;
3924}
3925
3926
3927static int
3928vmxnet3_resume(struct device *device)
3929{
5ec82c1e 3930 int err;
83d0feff 3931 unsigned long flags;
d1a890fa
SB
3932 struct pci_dev *pdev = to_pci_dev(device);
3933 struct net_device *netdev = pci_get_drvdata(pdev);
3934 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
d1a890fa
SB
3935
3936 if (!netif_running(netdev))
3937 return 0;
3938
d1a890fa
SB
3939 pci_set_power_state(pdev, PCI_D0);
3940 pci_restore_state(pdev);
3941 err = pci_enable_device_mem(pdev);
3942 if (err != 0)
3943 return err;
3944
3945 pci_enable_wake(pdev, PCI_D0, 0);
3946
5ec82c1e
SK
3947 vmxnet3_alloc_intr_resources(adapter);
3948
3949 /* During hibernate and suspend, device has to be reinitialized as the
3950 * device state need not be preserved.
3951 */
3952
3953 /* Need not check adapter state as other reset tasks cannot run during
3954 * device resume.
3955 */
83d0feff 3956 spin_lock_irqsave(&adapter->cmd_lock, flags);
d1a890fa 3957 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
5ec82c1e 3958 VMXNET3_CMD_QUIESCE_DEV);
83d0feff 3959 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
5ec82c1e
SK
3960 vmxnet3_tq_cleanup_all(adapter);
3961 vmxnet3_rq_cleanup_all(adapter);
3962
3963 vmxnet3_reset_dev(adapter);
3964 err = vmxnet3_activate_dev(adapter);
3965 if (err != 0) {
3966 netdev_err(netdev,
3967 "failed to re-activate on resume, error: %d", err);
3968 vmxnet3_force_close(adapter);
3969 return err;
3970 }
3971 netif_device_attach(netdev);
d1a890fa
SB
3972
3973 return 0;
3974}
3975
47145210 3976static const struct dev_pm_ops vmxnet3_pm_ops = {
d1a890fa
SB
3977 .suspend = vmxnet3_suspend,
3978 .resume = vmxnet3_resume,
5ec82c1e
SK
3979 .freeze = vmxnet3_suspend,
3980 .restore = vmxnet3_resume,
d1a890fa
SB
3981};
3982#endif
3983
3984static struct pci_driver vmxnet3_driver = {
3985 .name = vmxnet3_driver_name,
3986 .id_table = vmxnet3_pciid_table,
3987 .probe = vmxnet3_probe_device,
3a4751a3 3988 .remove = vmxnet3_remove_device,
e9ba47bf 3989 .shutdown = vmxnet3_shutdown_device,
d1a890fa
SB
3990#ifdef CONFIG_PM
3991 .driver.pm = &vmxnet3_pm_ops,
3992#endif
3993};
3994
3995
3996static int __init
3997vmxnet3_init_module(void)
3998{
204a6e65 3999 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
d1a890fa
SB
4000 VMXNET3_DRIVER_VERSION_REPORT);
4001 return pci_register_driver(&vmxnet3_driver);
4002}
4003
4004module_init(vmxnet3_init_module);
4005
4006
4007static void
4008vmxnet3_exit_module(void)
4009{
4010 pci_unregister_driver(&vmxnet3_driver);
4011}
4012
4013module_exit(vmxnet3_exit_module);
4014
4015MODULE_AUTHOR("VMware, Inc.");
4016MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4017MODULE_LICENSE("GPL v2");
4018MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);