2 * Linux driver for VMware's vmxnet3 ethernet NIC.
4 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
23 * Maintained by: pv-drivers@vmware.com
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
30 #include "vmxnet3_int.h"
31 #include "vmxnet3_xdp.h"
33 char vmxnet3_driver_name[] = "vmxnet3";
34 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
38 * Last entry must be all 0s
40 static const struct pci_device_id vmxnet3_pciid_table[] = {
41 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
45 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
47 static int enable_mq = 1;
50 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
53 * Enable/Disable the given intr
56 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
58 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
63 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
65 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
70 * Enable/Disable all intrs used by the device
73 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
77 for (i = 0; i < adapter->intr.num_intrs; i++)
78 vmxnet3_enable_intr(adapter, i);
79 if (!VMXNET3_VERSION_GE_6(adapter) ||
80 !adapter->queuesExtEnabled) {
81 adapter->shared->devRead.intrConf.intrCtrl &=
82 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
84 adapter->shared->devReadExt.intrConfExt.intrCtrl &=
85 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
91 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
95 if (!VMXNET3_VERSION_GE_6(adapter) ||
96 !adapter->queuesExtEnabled) {
97 adapter->shared->devRead.intrConf.intrCtrl |=
98 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
100 adapter->shared->devReadExt.intrConfExt.intrCtrl |=
101 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
103 for (i = 0; i < adapter->intr.num_intrs; i++)
104 vmxnet3_disable_intr(adapter, i);
109 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
111 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
116 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
123 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
131 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
139 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
146 /* Check if capability is supported by UPT device or
147 * UPT is even requested
150 vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
152 if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
153 cap_supported & (1UL << cap)) {
162 * Check the link state. This may start or stop the tx queue.
165 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
171 spin_lock_irqsave(&adapter->cmd_lock, flags);
172 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
173 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
174 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
176 adapter->link_speed = ret >> 16;
177 if (ret & 1) { /* Link is up. */
178 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
179 adapter->link_speed);
180 netif_carrier_on(adapter->netdev);
183 for (i = 0; i < adapter->num_tx_queues; i++)
184 vmxnet3_tq_start(&adapter->tx_queue[i],
188 netdev_info(adapter->netdev, "NIC Link is Down\n");
189 netif_carrier_off(adapter->netdev);
192 for (i = 0; i < adapter->num_tx_queues; i++)
193 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
199 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
203 u32 events = le32_to_cpu(adapter->shared->ecr);
207 vmxnet3_ack_events(adapter, events);
209 /* Check if link state has changed */
210 if (events & VMXNET3_ECR_LINK)
211 vmxnet3_check_link(adapter, true);
213 /* Check if there is an error on xmit/recv queues */
214 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
215 spin_lock_irqsave(&adapter->cmd_lock, flags);
216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
217 VMXNET3_CMD_GET_QUEUE_STATUS);
218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
220 for (i = 0; i < adapter->num_tx_queues; i++)
221 if (adapter->tqd_start[i].status.stopped)
222 dev_err(&adapter->netdev->dev,
223 "%s: tq[%d] error 0x%x\n",
224 adapter->netdev->name, i, le32_to_cpu(
225 adapter->tqd_start[i].status.error));
226 for (i = 0; i < adapter->num_rx_queues; i++)
227 if (adapter->rqd_start[i].status.stopped)
228 dev_err(&adapter->netdev->dev,
229 "%s: rq[%d] error 0x%x\n",
230 adapter->netdev->name, i,
231 adapter->rqd_start[i].status.error);
233 schedule_work(&adapter->work);
237 #ifdef __BIG_ENDIAN_BITFIELD
239 * The device expects the bitfields in shared structures to be written in
240 * little endian. When CPU is big endian, the following routines are used to
241 * correctly read and write into ABI.
242 * The general technique used here is : double word bitfields are defined in
243 * opposite order for big endian architecture. Then before reading them in
244 * driver the complete double word is translated using le32_to_cpu. Similarly
245 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
246 * double words into required format.
247 * In order to avoid touching bits in shared structure more than once, temporary
248 * descriptors are used. These are passed as srcDesc to following functions.
250 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
251 struct Vmxnet3_RxDesc *dstDesc)
253 u32 *src = (u32 *)srcDesc + 2;
254 u32 *dst = (u32 *)dstDesc + 2;
255 dstDesc->addr = le64_to_cpu(srcDesc->addr);
256 *dst = le32_to_cpu(*src);
257 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
260 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
261 struct Vmxnet3_TxDesc *dstDesc)
264 u32 *src = (u32 *)(srcDesc + 1);
265 u32 *dst = (u32 *)(dstDesc + 1);
267 /* Working backwards so that the gen bit is set at the end. */
268 for (i = 2; i > 0; i--) {
271 *dst = cpu_to_le32(*src);
276 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
277 struct Vmxnet3_RxCompDesc *dstDesc)
280 u32 *src = (u32 *)srcDesc;
281 u32 *dst = (u32 *)dstDesc;
282 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
283 *dst = le32_to_cpu(*src);
290 /* Used to read bitfield values from double words. */
291 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
293 u32 temp = le32_to_cpu(*bitfield);
294 u32 mask = ((1 << size) - 1) << pos;
302 #endif /* __BIG_ENDIAN_BITFIELD */
304 #ifdef __BIG_ENDIAN_BITFIELD
306 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
307 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
308 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
309 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
310 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
311 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
312 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
313 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
314 VMXNET3_TCD_GEN_SIZE)
315 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
316 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
317 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
319 vmxnet3_RxCompToCPU((rcd), (tmp)); \
321 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
323 vmxnet3_RxDescToCPU((rxd), (tmp)); \
328 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
329 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
330 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
331 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
332 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
333 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
335 #endif /* __BIG_ENDIAN_BITFIELD */
339 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
340 struct pci_dev *pdev)
342 u32 map_type = tbi->map_type;
344 if (map_type & VMXNET3_MAP_SINGLE)
345 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
347 else if (map_type & VMXNET3_MAP_PAGE)
348 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
351 BUG_ON(map_type & ~VMXNET3_MAP_XDP);
353 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
358 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
359 struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
360 struct xdp_frame_bulk *bq)
362 struct vmxnet3_tx_buf_info *tbi;
366 /* no out of order completion */
367 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
368 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
370 tbi = &tq->buf_info[eop_idx];
372 map_type = tbi->map_type;
373 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
375 while (tq->tx_ring.next2comp != eop_idx) {
376 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
379 /* update next2comp w/o tx_lock. Since we are marking more,
380 * instead of less, tx ring entries avail, the worst case is
381 * that the tx routine incorrectly re-queues a pkt due to
382 * insufficient tx ring entries.
384 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
388 if (map_type & VMXNET3_MAP_XDP)
389 xdp_return_frame_bulk(tbi->xdpf, bq);
391 dev_kfree_skb_any(tbi->skb);
393 /* xdpf and skb are in an anonymous union. */
401 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
402 struct vmxnet3_adapter *adapter)
404 union Vmxnet3_GenericDesc *gdesc;
405 struct xdp_frame_bulk bq;
408 xdp_frame_bulk_init(&bq);
411 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
412 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
413 /* Prevent any &gdesc->tcd field from being (speculatively)
414 * read before (&gdesc->tcd)->gen is read.
418 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
419 &gdesc->tcd), tq, adapter->pdev,
422 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
423 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
425 xdp_flush_frame_bulk(&bq);
429 spin_lock(&tq->tx_lock);
430 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
431 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
432 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
433 netif_carrier_ok(adapter->netdev))) {
434 vmxnet3_tq_wake(tq, adapter);
436 spin_unlock(&tq->tx_lock);
443 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
444 struct vmxnet3_adapter *adapter)
446 struct xdp_frame_bulk bq;
450 xdp_frame_bulk_init(&bq);
453 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
454 struct vmxnet3_tx_buf_info *tbi;
456 tbi = tq->buf_info + tq->tx_ring.next2comp;
457 map_type = tbi->map_type;
459 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
461 if (map_type & VMXNET3_MAP_XDP)
462 xdp_return_frame_bulk(tbi->xdpf, &bq);
464 dev_kfree_skb_any(tbi->skb);
467 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
470 xdp_flush_frame_bulk(&bq);
473 /* sanity check, verify all buffers are indeed unmapped */
474 for (i = 0; i < tq->tx_ring.size; i++)
475 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
477 tq->tx_ring.gen = VMXNET3_INIT_GEN;
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
480 tq->comp_ring.gen = VMXNET3_INIT_GEN;
481 tq->comp_ring.next2proc = 0;
486 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
487 struct vmxnet3_adapter *adapter)
489 if (tq->tx_ring.base) {
490 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
491 sizeof(struct Vmxnet3_TxDesc),
492 tq->tx_ring.base, tq->tx_ring.basePA);
493 tq->tx_ring.base = NULL;
495 if (tq->data_ring.base) {
496 dma_free_coherent(&adapter->pdev->dev,
497 tq->data_ring.size * tq->txdata_desc_size,
498 tq->data_ring.base, tq->data_ring.basePA);
499 tq->data_ring.base = NULL;
501 if (tq->comp_ring.base) {
502 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
503 sizeof(struct Vmxnet3_TxCompDesc),
504 tq->comp_ring.base, tq->comp_ring.basePA);
505 tq->comp_ring.base = NULL;
512 /* Destroy all tx queues */
514 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
518 for (i = 0; i < adapter->num_tx_queues; i++)
519 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
524 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
525 struct vmxnet3_adapter *adapter)
529 /* reset the tx ring contents to 0 and reset the tx ring states */
530 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
531 sizeof(struct Vmxnet3_TxDesc));
532 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
533 tq->tx_ring.gen = VMXNET3_INIT_GEN;
535 memset(tq->data_ring.base, 0,
536 tq->data_ring.size * tq->txdata_desc_size);
538 /* reset the tx comp ring contents to 0 and reset comp ring states */
539 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
540 sizeof(struct Vmxnet3_TxCompDesc));
541 tq->comp_ring.next2proc = 0;
542 tq->comp_ring.gen = VMXNET3_INIT_GEN;
544 /* reset the bookkeeping data */
545 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
546 for (i = 0; i < tq->tx_ring.size; i++)
547 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
549 /* stats are not reset */
554 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
555 struct vmxnet3_adapter *adapter)
557 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
558 tq->comp_ring.base || tq->buf_info);
560 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
561 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
562 &tq->tx_ring.basePA, GFP_KERNEL);
563 if (!tq->tx_ring.base) {
564 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
568 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
569 tq->data_ring.size * tq->txdata_desc_size,
570 &tq->data_ring.basePA, GFP_KERNEL);
571 if (!tq->data_ring.base) {
572 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
576 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
577 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
578 &tq->comp_ring.basePA, GFP_KERNEL);
579 if (!tq->comp_ring.base) {
580 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
584 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
586 dev_to_node(&adapter->pdev->dev));
593 vmxnet3_tq_destroy(tq, adapter);
598 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
602 for (i = 0; i < adapter->num_tx_queues; i++)
603 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
607 * starting from ring->next2fill, allocate rx buffers for the given ring
608 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
609 * are allocated or allocation fails
613 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
614 int num_to_alloc, struct vmxnet3_adapter *adapter)
616 int num_allocated = 0;
617 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
618 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
621 while (num_allocated <= num_to_alloc) {
622 struct vmxnet3_rx_buf_info *rbi;
623 union Vmxnet3_GenericDesc *gd;
625 rbi = rbi_base + ring->next2fill;
626 gd = ring->base + ring->next2fill;
627 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
629 if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
630 void *data = vmxnet3_pp_get_buff(rq->page_pool,
634 rq->stats.rx_buf_alloc_failure++;
637 rbi->page = virt_to_page(data);
638 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
639 } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
640 if (rbi->skb == NULL) {
641 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
644 if (unlikely(rbi->skb == NULL)) {
645 rq->stats.rx_buf_alloc_failure++;
649 rbi->dma_addr = dma_map_single(
651 rbi->skb->data, rbi->len,
653 if (dma_mapping_error(&adapter->pdev->dev,
655 dev_kfree_skb_any(rbi->skb);
657 rq->stats.rx_buf_alloc_failure++;
661 /* rx buffer skipped by the device */
663 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
665 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
666 rbi->len != PAGE_SIZE);
668 if (rbi->page == NULL) {
669 rbi->page = alloc_page(GFP_ATOMIC);
670 if (unlikely(rbi->page == NULL)) {
671 rq->stats.rx_buf_alloc_failure++;
674 rbi->dma_addr = dma_map_page(
676 rbi->page, 0, PAGE_SIZE,
678 if (dma_mapping_error(&adapter->pdev->dev,
682 rq->stats.rx_buf_alloc_failure++;
686 /* rx buffers skipped by the device */
688 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
691 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
692 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
695 /* Fill the last buffer but dont mark it ready, or else the
696 * device will think that the queue is full */
697 if (num_allocated == num_to_alloc) {
698 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
702 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
704 vmxnet3_cmd_ring_adv_next2fill(ring);
707 netdev_dbg(adapter->netdev,
708 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
709 num_allocated, ring->next2fill, ring->next2comp);
711 /* so that the device can distinguish a full ring and an empty ring */
712 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
714 return num_allocated;
719 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
720 struct vmxnet3_rx_buf_info *rbi)
722 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
724 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
726 skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
727 skb->data_len += rcd->len;
728 skb->truesize += PAGE_SIZE;
729 skb_shinfo(skb)->nr_frags++;
734 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
735 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
736 struct vmxnet3_adapter *adapter)
739 unsigned long buf_offset;
741 union Vmxnet3_GenericDesc *gdesc;
742 struct vmxnet3_tx_buf_info *tbi = NULL;
744 BUG_ON(ctx->copy_size > skb_headlen(skb));
746 /* use the previous gen bit for the SOP desc */
747 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
749 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
750 gdesc = ctx->sop_txd; /* both loops below can be skipped */
752 /* no need to map the buffer if headers are copied */
753 if (ctx->copy_size) {
754 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
755 tq->tx_ring.next2fill *
756 tq->txdata_desc_size);
757 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
758 ctx->sop_txd->dword[3] = 0;
760 tbi = tq->buf_info + tq->tx_ring.next2fill;
761 tbi->map_type = VMXNET3_MAP_NONE;
763 netdev_dbg(adapter->netdev,
764 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
765 tq->tx_ring.next2fill,
766 le64_to_cpu(ctx->sop_txd->txd.addr),
767 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
768 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
770 /* use the right gen for non-SOP desc */
771 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
774 /* linear part can use multiple tx desc if it's big */
775 len = skb_headlen(skb) - ctx->copy_size;
776 buf_offset = ctx->copy_size;
780 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
784 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
785 /* spec says that for TxDesc.len, 0 == 2^14 */
788 tbi = tq->buf_info + tq->tx_ring.next2fill;
789 tbi->map_type = VMXNET3_MAP_SINGLE;
790 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
791 skb->data + buf_offset, buf_size,
793 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
798 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
799 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
801 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
802 gdesc->dword[2] = cpu_to_le32(dw2);
805 netdev_dbg(adapter->netdev,
806 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
807 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
808 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
809 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
810 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
813 buf_offset += buf_size;
816 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
817 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
821 len = skb_frag_size(frag);
823 tbi = tq->buf_info + tq->tx_ring.next2fill;
824 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
828 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
829 /* spec says that for TxDesc.len, 0 == 2^14 */
831 tbi->map_type = VMXNET3_MAP_PAGE;
832 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
833 buf_offset, buf_size,
835 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
840 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
841 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
843 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
844 gdesc->dword[2] = cpu_to_le32(dw2);
847 netdev_dbg(adapter->netdev,
848 "txd[%u]: 0x%llx %u %u\n",
849 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
850 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
851 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
852 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
855 buf_offset += buf_size;
859 ctx->eop_txd = gdesc;
861 /* set the last buf_info for the pkt */
863 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
869 /* Init all tx queues */
871 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
875 for (i = 0; i < adapter->num_tx_queues; i++)
876 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
881 * parse relevant protocol headers:
882 * For a tso pkt, relevant headers are L2/3/4 including options
883 * For a pkt requesting csum offloading, they are L2/3 and may include L4
884 * if it's a TCP/UDP pkt
887 * -1: error happens during parsing
888 * 0: protocol headers parsed, but too big to be copied
889 * 1: protocol headers parsed and copied
892 * 1. related *ctx fields are updated.
893 * 2. ctx->copy_size is # of bytes copied
894 * 3. the portion to be copied is guaranteed to be in the linear part
898 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
899 struct vmxnet3_tx_ctx *ctx,
900 struct vmxnet3_adapter *adapter)
904 if (ctx->mss) { /* TSO */
905 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
906 ctx->l4_offset = skb_inner_transport_offset(skb);
907 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
908 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
910 ctx->l4_offset = skb_transport_offset(skb);
911 ctx->l4_hdr_size = tcp_hdrlen(skb);
912 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
915 if (skb->ip_summed == CHECKSUM_PARTIAL) {
916 /* For encap packets, skb_checksum_start_offset refers
917 * to inner L4 offset. Thus, below works for encap as
918 * well as non-encap case
920 ctx->l4_offset = skb_checksum_start_offset(skb);
922 if (VMXNET3_VERSION_GE_4(adapter) &&
923 skb->encapsulation) {
924 struct iphdr *iph = inner_ip_hdr(skb);
926 if (iph->version == 4) {
927 protocol = iph->protocol;
929 const struct ipv6hdr *ipv6h;
931 ipv6h = inner_ipv6_hdr(skb);
932 protocol = ipv6h->nexthdr;
936 const struct iphdr *iph = ip_hdr(skb);
938 protocol = iph->protocol;
939 } else if (ctx->ipv6) {
940 const struct ipv6hdr *ipv6h;
942 ipv6h = ipv6_hdr(skb);
943 protocol = ipv6h->nexthdr;
949 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
953 ctx->l4_hdr_size = sizeof(struct udphdr);
956 ctx->l4_hdr_size = 0;
960 ctx->copy_size = min(ctx->l4_offset +
961 ctx->l4_hdr_size, skb->len);
964 ctx->l4_hdr_size = 0;
965 /* copy as much as allowed */
966 ctx->copy_size = min_t(unsigned int,
967 tq->txdata_desc_size,
971 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
972 ctx->copy_size = skb->len;
974 /* make sure headers are accessible directly */
975 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
979 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
980 tq->stats.oversized_hdr++;
991 * copy relevant protocol headers to the transmit ring:
992 * For a tso pkt, relevant headers are L2/3/4 including options
993 * For a pkt requesting csum offloading, they are L2/3 and may include L4
994 * if it's a TCP/UDP pkt
997 * Note that this requires that vmxnet3_parse_hdr be called first to set the
998 * appropriate bits in ctx first
1001 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1002 struct vmxnet3_tx_ctx *ctx,
1003 struct vmxnet3_adapter *adapter)
1005 struct Vmxnet3_TxDataDesc *tdd;
1007 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
1008 tq->tx_ring.next2fill *
1009 tq->txdata_desc_size);
1011 memcpy(tdd->data, skb->data, ctx->copy_size);
1012 netdev_dbg(adapter->netdev,
1013 "copy %u bytes to dataRing[%u]\n",
1014 ctx->copy_size, tq->tx_ring.next2fill);
1019 vmxnet3_prepare_inner_tso(struct sk_buff *skb,
1020 struct vmxnet3_tx_ctx *ctx)
1022 struct tcphdr *tcph = inner_tcp_hdr(skb);
1023 struct iphdr *iph = inner_ip_hdr(skb);
1025 if (iph->version == 4) {
1027 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1030 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
1032 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
1038 vmxnet3_prepare_tso(struct sk_buff *skb,
1039 struct vmxnet3_tx_ctx *ctx)
1041 struct tcphdr *tcph = tcp_hdr(skb);
1044 struct iphdr *iph = ip_hdr(skb);
1047 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1049 } else if (ctx->ipv6) {
1050 tcp_v6_gso_csum_prep(skb);
1054 static int txd_estimate(const struct sk_buff *skb)
1056 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1059 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1060 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1062 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1068 * Transmits a pkt thru a given tq
1070 * NETDEV_TX_OK: descriptors are setup successfully
1071 * NETDEV_TX_OK: error occurred, the pkt is dropped
1072 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1075 * 1. tx ring may be changed
1076 * 2. tq stats may be updated accordingly
1077 * 3. shared->txNumDeferred may be updated
1081 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1082 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1087 int tx_num_deferred;
1088 unsigned long flags;
1089 struct vmxnet3_tx_ctx ctx;
1090 union Vmxnet3_GenericDesc *gdesc;
1091 #ifdef __BIG_ENDIAN_BITFIELD
1092 /* Use temporary descriptor to avoid touching bits multiple times */
1093 union Vmxnet3_GenericDesc tempTxDesc;
1096 count = txd_estimate(skb);
1098 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1099 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1101 ctx.mss = skb_shinfo(skb)->gso_size;
1103 if (skb_header_cloned(skb)) {
1104 if (unlikely(pskb_expand_head(skb, 0, 0,
1105 GFP_ATOMIC) != 0)) {
1106 tq->stats.drop_tso++;
1109 tq->stats.copy_skb_header++;
1111 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1112 /* tso pkts must not use more than
1113 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1115 if (skb_linearize(skb) != 0) {
1116 tq->stats.drop_too_many_frags++;
1119 tq->stats.linearized++;
1121 /* recalculate the # of descriptors to use */
1122 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1123 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1124 tq->stats.drop_too_many_frags++;
1128 if (skb->encapsulation) {
1129 vmxnet3_prepare_inner_tso(skb, &ctx);
1131 vmxnet3_prepare_tso(skb, &ctx);
1134 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1136 /* non-tso pkts must not use more than
1137 * VMXNET3_MAX_TXD_PER_PKT entries
1139 if (skb_linearize(skb) != 0) {
1140 tq->stats.drop_too_many_frags++;
1143 tq->stats.linearized++;
1145 /* recalculate the # of descriptors to use */
1146 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1150 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1152 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1153 /* hdrs parsed, check against other limits */
1155 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1156 VMXNET3_MAX_TX_BUF_SIZE)) {
1157 tq->stats.drop_oversized_hdr++;
1161 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1162 if (unlikely(ctx.l4_offset +
1164 VMXNET3_MAX_CSUM_OFFSET)) {
1165 tq->stats.drop_oversized_hdr++;
1171 tq->stats.drop_hdr_inspect_err++;
1175 spin_lock_irqsave(&tq->tx_lock, flags);
1177 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1178 tq->stats.tx_ring_full++;
1179 netdev_dbg(adapter->netdev,
1180 "tx queue stopped on %s, next2comp %u"
1181 " next2fill %u\n", adapter->netdev->name,
1182 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1184 vmxnet3_tq_stop(tq, adapter);
1185 spin_unlock_irqrestore(&tq->tx_lock, flags);
1186 return NETDEV_TX_BUSY;
1190 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1192 /* fill tx descs related to addr & len */
1193 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1194 goto unlock_drop_pkt;
1196 /* setup the EOP desc */
1197 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1199 /* setup the SOP desc */
1200 #ifdef __BIG_ENDIAN_BITFIELD
1201 gdesc = &tempTxDesc;
1202 gdesc->dword[2] = ctx.sop_txd->dword[2];
1203 gdesc->dword[3] = ctx.sop_txd->dword[3];
1205 gdesc = ctx.sop_txd;
1207 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1209 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1210 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1211 if (VMXNET3_VERSION_GE_7(adapter)) {
1212 gdesc->txd.om = VMXNET3_OM_TSO;
1213 gdesc->txd.ext1 = 1;
1215 gdesc->txd.om = VMXNET3_OM_ENCAP;
1217 gdesc->txd.msscof = ctx.mss;
1219 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1222 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1223 gdesc->txd.om = VMXNET3_OM_TSO;
1224 gdesc->txd.msscof = ctx.mss;
1226 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1228 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1229 if (VMXNET3_VERSION_GE_4(adapter) &&
1230 skb->encapsulation) {
1231 gdesc->txd.hlen = ctx.l4_offset +
1233 if (VMXNET3_VERSION_GE_7(adapter)) {
1234 gdesc->txd.om = VMXNET3_OM_CSUM;
1235 gdesc->txd.msscof = ctx.l4_offset +
1237 gdesc->txd.ext1 = 1;
1239 gdesc->txd.om = VMXNET3_OM_ENCAP;
1240 gdesc->txd.msscof = 0; /* Reserved */
1243 gdesc->txd.hlen = ctx.l4_offset;
1244 gdesc->txd.om = VMXNET3_OM_CSUM;
1245 gdesc->txd.msscof = ctx.l4_offset +
1250 gdesc->txd.msscof = 0;
1254 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1255 tx_num_deferred += num_pkts;
1257 if (skb_vlan_tag_present(skb)) {
1259 gdesc->txd.tci = skb_vlan_tag_get(skb);
1262 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1263 * all other writes to &gdesc->txd.
1267 /* finally flips the GEN bit of the SOP desc. */
1268 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1270 #ifdef __BIG_ENDIAN_BITFIELD
1271 /* Finished updating in bitfields of Tx Desc, so write them in original
1274 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1275 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1276 gdesc = ctx.sop_txd;
1278 netdev_dbg(adapter->netdev,
1279 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1281 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1282 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1284 spin_unlock_irqrestore(&tq->tx_lock, flags);
1286 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1287 tq->shared->txNumDeferred = 0;
1288 VMXNET3_WRITE_BAR0_REG(adapter,
1289 adapter->tx_prod_offset + tq->qid * 8,
1290 tq->tx_ring.next2fill);
1293 return NETDEV_TX_OK;
1296 spin_unlock_irqrestore(&tq->tx_lock, flags);
1298 tq->stats.drop_total++;
1299 dev_kfree_skb_any(skb);
1300 return NETDEV_TX_OK;
1304 vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
1305 struct vmxnet3_rx_queue *rq, int size)
1307 bool xdp_prog = vmxnet3_xdp_enabled(adapter);
1308 const struct page_pool_params pp_params = {
1310 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1312 .nid = NUMA_NO_NODE,
1313 .dev = &adapter->pdev->dev,
1314 .offset = VMXNET3_XDP_RX_OFFSET,
1315 .max_len = VMXNET3_XDP_MAX_FRSIZE,
1316 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
1318 struct page_pool *pp;
1321 pp = page_pool_create(&pp_params);
1325 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
1330 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
1332 goto err_unregister_rxq;
1339 xdp_rxq_info_unreg(&rq->xdp_rxq);
1341 page_pool_destroy(pp);
1347 vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1352 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1353 if (unlikely(!page))
1356 *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
1358 return page_address(page);
1362 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1364 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1366 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1367 return vmxnet3_tq_xmit(skb,
1368 &adapter->tx_queue[skb->queue_mapping],
1374 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1375 struct sk_buff *skb,
1376 union Vmxnet3_GenericDesc *gdesc)
1378 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1379 if (gdesc->rcd.v4 &&
1380 (le32_to_cpu(gdesc->dword[3]) &
1381 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1382 skb->ip_summed = CHECKSUM_UNNECESSARY;
1383 if ((le32_to_cpu(gdesc->dword[0]) &
1384 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1385 skb->csum_level = 1;
1387 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1388 !(le32_to_cpu(gdesc->dword[0]) &
1389 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1390 WARN_ON_ONCE(gdesc->rcd.frg &&
1391 !(le32_to_cpu(gdesc->dword[0]) &
1392 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1393 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1394 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1395 skb->ip_summed = CHECKSUM_UNNECESSARY;
1396 if ((le32_to_cpu(gdesc->dword[0]) &
1397 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1398 skb->csum_level = 1;
1400 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1401 !(le32_to_cpu(gdesc->dword[0]) &
1402 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1403 WARN_ON_ONCE(gdesc->rcd.frg &&
1404 !(le32_to_cpu(gdesc->dword[0]) &
1405 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1407 if (gdesc->rcd.csum) {
1408 skb->csum = htons(gdesc->rcd.csum);
1409 skb->ip_summed = CHECKSUM_PARTIAL;
1411 skb_checksum_none_assert(skb);
1415 skb_checksum_none_assert(skb);
1421 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1422 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1424 rq->stats.drop_err++;
1426 rq->stats.drop_fcs++;
1428 rq->stats.drop_total++;
1431 * We do not unmap and chain the rx buffer to the skb.
1432 * We basically pretend this buffer is not used and will be recycled
1433 * by vmxnet3_rq_alloc_rx_buf()
1437 * ctx->skb may be NULL if this is the first and the only one
1441 dev_kfree_skb_irq(ctx->skb);
1448 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1449 union Vmxnet3_GenericDesc *gdesc)
1455 struct vlan_ethhdr *veth;
1457 struct ipv6hdr *ipv6;
1460 BUG_ON(gdesc->rcd.tcp == 0);
1462 maplen = skb_headlen(skb);
1463 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1466 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1467 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1468 hlen = sizeof(struct vlan_ethhdr);
1470 hlen = sizeof(struct ethhdr);
1472 hdr.eth = eth_hdr(skb);
1473 if (gdesc->rcd.v4) {
1474 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1475 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1477 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1478 hlen = hdr.ipv4->ihl << 2;
1479 hdr.ptr += hdr.ipv4->ihl << 2;
1480 } else if (gdesc->rcd.v6) {
1481 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1482 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1484 /* Use an estimated value, since we also need to handle
1487 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1488 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1489 hlen = sizeof(struct ipv6hdr);
1490 hdr.ptr += sizeof(struct ipv6hdr);
1492 /* Non-IP pkt, dont estimate header length */
1496 if (hlen + sizeof(struct tcphdr) > maplen)
1499 return (hlen + (hdr.tcp->doff << 2));
1503 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1504 struct vmxnet3_adapter *adapter, int quota)
1506 u32 rxprod_reg[2] = {
1507 adapter->rx_prod_offset, adapter->rx_prod2_offset
1510 bool skip_page_frags = false;
1511 bool encap_lro = false;
1512 struct Vmxnet3_RxCompDesc *rcd;
1513 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1514 u16 segCnt = 0, mss = 0;
1515 int comp_offset, fill_offset;
1516 #ifdef __BIG_ENDIAN_BITFIELD
1517 struct Vmxnet3_RxDesc rxCmdDesc;
1518 struct Vmxnet3_RxCompDesc rxComp;
1520 bool need_flush = false;
1522 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1524 while (rcd->gen == rq->comp_ring.gen) {
1525 struct vmxnet3_rx_buf_info *rbi;
1526 struct sk_buff *skb, *new_skb = NULL;
1527 struct page *new_page = NULL;
1528 dma_addr_t new_dma_addr;
1530 struct Vmxnet3_RxDesc *rxd;
1532 struct vmxnet3_cmd_ring *ring = NULL;
1533 if (num_pkts >= quota) {
1534 /* we may stop even before we see the EOP desc of
1540 /* Prevent any rcd field from being (speculatively) read before
1545 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1546 rcd->rqID != rq->dataRingQid);
1548 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1549 ring = rq->rx_ring + ring_idx;
1550 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1552 rbi = rq->buf_info[ring_idx] + idx;
1554 BUG_ON(rxd->addr != rbi->dma_addr ||
1555 rxd->len != rbi->len);
1557 if (unlikely(rcd->eop && rcd->err)) {
1558 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1562 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
1563 struct sk_buff *skb_xdp_pass;
1566 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
1568 goto skip_xdp; /* Handle it later. */
1571 if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
1574 act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
1576 if (act == XDP_PASS) {
1577 ctx->skb = skb_xdp_pass;
1581 need_flush |= act == XDP_REDIRECT;
1587 if (rcd->sop) { /* first buf of the pkt */
1588 bool rxDataRingUsed;
1591 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1592 (rcd->rqID != rq->qid &&
1593 rcd->rqID != rq->dataRingQid));
1595 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
1596 rbi->buf_type != VMXNET3_RX_BUF_XDP);
1597 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1599 if (unlikely(rcd->len == 0)) {
1600 /* Pretend the rx buffer is skipped. */
1601 BUG_ON(!(rcd->sop && rcd->eop));
1602 netdev_dbg(adapter->netdev,
1603 "rxRing[%u][%u] 0 length\n",
1608 skip_page_frags = false;
1609 ctx->skb = rbi->skb;
1612 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1613 len = rxDataRingUsed ? rcd->len : rbi->len;
1615 if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
1616 struct sk_buff *skb_xdp_pass;
1620 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1621 act = vmxnet3_process_xdp_small(adapter, rq,
1622 &rq->data_ring.base[sz],
1625 if (act == XDP_PASS) {
1626 ctx->skb = skb_xdp_pass;
1629 need_flush |= act == XDP_REDIRECT;
1633 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1635 if (new_skb == NULL) {
1636 /* Skb allocation failed, do not handover this
1637 * skb to stack. Reuse it. Drop the existing pkt
1639 rq->stats.rx_buf_alloc_failure++;
1641 rq->stats.drop_total++;
1642 skip_page_frags = true;
1646 if (rxDataRingUsed && adapter->rxdataring_enabled) {
1649 BUG_ON(rcd->len > rq->data_ring.desc_size);
1652 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1653 memcpy(new_skb->data,
1654 &rq->data_ring.base[sz], rcd->len);
1656 ctx->skb = rbi->skb;
1659 dma_map_single(&adapter->pdev->dev,
1660 new_skb->data, rbi->len,
1662 if (dma_mapping_error(&adapter->pdev->dev,
1664 dev_kfree_skb(new_skb);
1665 /* Skb allocation failed, do not
1666 * handover this skb to stack. Reuse
1667 * it. Drop the existing pkt.
1669 rq->stats.rx_buf_alloc_failure++;
1671 rq->stats.drop_total++;
1672 skip_page_frags = true;
1676 dma_unmap_single(&adapter->pdev->dev,
1681 /* Immediate refill */
1683 rbi->dma_addr = new_dma_addr;
1684 rxd->addr = cpu_to_le64(rbi->dma_addr);
1685 rxd->len = rbi->len;
1688 skb_record_rx_queue(ctx->skb, rq->qid);
1689 skb_put(ctx->skb, rcd->len);
1691 if (VMXNET3_VERSION_GE_2(adapter) &&
1692 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1693 struct Vmxnet3_RxCompDescExt *rcdlro;
1694 union Vmxnet3_GenericDesc *gdesc;
1696 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1697 gdesc = (union Vmxnet3_GenericDesc *)rcd;
1699 segCnt = rcdlro->segCnt;
1700 WARN_ON_ONCE(segCnt == 0);
1702 if (unlikely(segCnt <= 1))
1704 encap_lro = (le32_to_cpu(gdesc->dword[0]) &
1705 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
1710 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1712 /* non SOP buffer must be type 1 in most cases */
1713 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1714 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1716 /* If an sop buffer was dropped, skip all
1717 * following non-sop fragments. They will be reused.
1719 if (skip_page_frags)
1723 new_page = alloc_page(GFP_ATOMIC);
1724 /* Replacement page frag could not be allocated.
1725 * Reuse this page. Drop the pkt and free the
1726 * skb which contained this page as a frag. Skip
1727 * processing all the following non-sop frags.
1729 if (unlikely(!new_page)) {
1730 rq->stats.rx_buf_alloc_failure++;
1731 dev_kfree_skb(ctx->skb);
1733 skip_page_frags = true;
1736 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1740 if (dma_mapping_error(&adapter->pdev->dev,
1743 rq->stats.rx_buf_alloc_failure++;
1744 dev_kfree_skb(ctx->skb);
1746 skip_page_frags = true;
1750 dma_unmap_page(&adapter->pdev->dev,
1751 rbi->dma_addr, rbi->len,
1754 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1756 /* Immediate refill */
1757 rbi->page = new_page;
1758 rbi->dma_addr = new_dma_addr;
1759 rxd->addr = cpu_to_le64(rbi->dma_addr);
1760 rxd->len = rbi->len;
1768 u32 mtu = adapter->netdev->mtu;
1769 skb->len += skb->data_len;
1772 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1773 (adapter->netdev->features & NETIF_F_RXHASH)) {
1774 enum pkt_hash_types hash_type;
1776 switch (rcd->rssType) {
1777 case VMXNET3_RCD_RSS_TYPE_IPV4:
1778 case VMXNET3_RCD_RSS_TYPE_IPV6:
1779 hash_type = PKT_HASH_TYPE_L3;
1781 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1782 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1783 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1784 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1785 hash_type = PKT_HASH_TYPE_L4;
1788 hash_type = PKT_HASH_TYPE_L3;
1792 le32_to_cpu(rcd->rssHash),
1796 vmxnet3_rx_csum(adapter, skb,
1797 (union Vmxnet3_GenericDesc *)rcd);
1798 skb->protocol = eth_type_trans(skb, adapter->netdev);
1799 if ((!rcd->tcp && !encap_lro) ||
1800 !(adapter->netdev->features & NETIF_F_LRO))
1803 if (segCnt != 0 && mss != 0) {
1804 skb_shinfo(skb)->gso_type = rcd->v4 ?
1805 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1806 skb_shinfo(skb)->gso_size = mss;
1807 skb_shinfo(skb)->gso_segs = segCnt;
1808 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
1811 hlen = vmxnet3_get_hdr_len(adapter, skb,
1812 (union Vmxnet3_GenericDesc *)rcd);
1816 skb_shinfo(skb)->gso_type =
1817 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1819 skb_shinfo(skb)->gso_segs = segCnt;
1820 skb_shinfo(skb)->gso_size =
1821 DIV_ROUND_UP(skb->len -
1824 skb_shinfo(skb)->gso_size = mtu - hlen;
1828 if (unlikely(rcd->ts))
1829 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1831 /* Use GRO callback if UPT is enabled */
1832 if ((adapter->netdev->features & NETIF_F_LRO) &&
1833 !rq->shared->updateRxProd)
1834 netif_receive_skb(skb);
1836 napi_gro_receive(&rq->napi, skb);
1844 /* device may have skipped some rx descs */
1845 ring = rq->rx_ring + ring_idx;
1846 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
1848 comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
1849 fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
1850 idx - ring->next2fill - 1;
1851 if (!ring->isOutOfOrder || fill_offset >= comp_offset)
1852 ring->next2comp = idx;
1853 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1855 /* Ensure that the writes to rxd->gen bits will be observed
1856 * after all other writes to rxd objects.
1860 while (num_to_alloc) {
1861 rbi = rq->buf_info[ring_idx] + ring->next2fill;
1862 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1864 if (ring_idx == 0) {
1865 /* ring0 Type1 buffers can get skipped; re-fill them */
1866 if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
1869 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
1871 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1873 WARN_ON(!rxd->addr);
1875 /* Recv desc is ready to be used by the device */
1876 rxd->gen = ring->gen;
1877 vmxnet3_cmd_ring_adv_next2fill(ring);
1878 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
1881 /* rx completion hasn't occurred */
1882 ring->isOutOfOrder = 1;
1887 if (num_to_alloc == 0) {
1888 ring->isOutOfOrder = 0;
1891 /* if needed, update the register */
1892 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1893 VMXNET3_WRITE_BAR0_REG(adapter,
1894 rxprod_reg[ring_idx] + rq->qid * 8,
1898 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1899 vmxnet3_getRxComp(rcd,
1900 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1910 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1911 struct vmxnet3_adapter *adapter)
1914 struct Vmxnet3_RxDesc *rxd;
1916 /* ring has already been cleaned up */
1917 if (!rq->rx_ring[0].base)
1920 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1921 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1922 struct vmxnet3_rx_buf_info *rbi;
1923 #ifdef __BIG_ENDIAN_BITFIELD
1924 struct Vmxnet3_RxDesc rxDesc;
1927 rbi = &rq->buf_info[ring_idx][i];
1928 vmxnet3_getRxDesc(rxd,
1929 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1931 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1932 rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
1933 page_pool_recycle_direct(rq->page_pool,
1936 } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1938 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1939 rxd->len, DMA_FROM_DEVICE);
1940 dev_kfree_skb(rbi->skb);
1942 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1944 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1945 rxd->len, DMA_FROM_DEVICE);
1946 put_page(rbi->page);
1951 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1952 rq->rx_ring[ring_idx].next2fill =
1953 rq->rx_ring[ring_idx].next2comp = 0;
1956 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1957 rq->comp_ring.next2proc = 0;
1962 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1966 for (i = 0; i < adapter->num_rx_queues; i++)
1967 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1968 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
1972 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1973 struct vmxnet3_adapter *adapter)
1978 /* all rx buffers must have already been freed */
1979 for (i = 0; i < 2; i++) {
1980 if (rq->buf_info[i]) {
1981 for (j = 0; j < rq->rx_ring[i].size; j++)
1982 BUG_ON(rq->buf_info[i][j].page != NULL);
1987 for (i = 0; i < 2; i++) {
1988 if (rq->rx_ring[i].base) {
1989 dma_free_coherent(&adapter->pdev->dev,
1991 * sizeof(struct Vmxnet3_RxDesc),
1992 rq->rx_ring[i].base,
1993 rq->rx_ring[i].basePA);
1994 rq->rx_ring[i].base = NULL;
1998 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
1999 xdp_rxq_info_unreg(&rq->xdp_rxq);
2000 page_pool_destroy(rq->page_pool);
2001 rq->page_pool = NULL;
2003 if (rq->data_ring.base) {
2004 dma_free_coherent(&adapter->pdev->dev,
2005 rq->rx_ring[0].size * rq->data_ring.desc_size,
2006 rq->data_ring.base, rq->data_ring.basePA);
2007 rq->data_ring.base = NULL;
2010 if (rq->comp_ring.base) {
2011 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2012 * sizeof(struct Vmxnet3_RxCompDesc),
2013 rq->comp_ring.base, rq->comp_ring.basePA);
2014 rq->comp_ring.base = NULL;
2017 kfree(rq->buf_info[0]);
2018 rq->buf_info[0] = NULL;
2019 rq->buf_info[1] = NULL;
2023 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
2027 for (i = 0; i < adapter->num_rx_queues; i++) {
2028 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2030 if (rq->data_ring.base) {
2031 dma_free_coherent(&adapter->pdev->dev,
2032 (rq->rx_ring[0].size *
2033 rq->data_ring.desc_size),
2035 rq->data_ring.basePA);
2036 rq->data_ring.base = NULL;
2037 rq->data_ring.desc_size = 0;
2043 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
2044 struct vmxnet3_adapter *adapter)
2048 /* initialize buf_info */
2049 for (i = 0; i < rq->rx_ring[0].size; i++) {
2051 /* 1st buf for a pkt is skbuff or xdp page */
2052 if (i % adapter->rx_buf_per_pkt == 0) {
2053 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
2054 VMXNET3_RX_BUF_XDP :
2056 rq->buf_info[0][i].len = adapter->skb_buf_size;
2057 } else { /* subsequent bufs for a pkt is frag */
2058 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
2059 rq->buf_info[0][i].len = PAGE_SIZE;
2062 for (i = 0; i < rq->rx_ring[1].size; i++) {
2063 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
2064 rq->buf_info[1][i].len = PAGE_SIZE;
2067 /* reset internal state and allocate buffers for both rings */
2068 for (i = 0; i < 2; i++) {
2069 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
2071 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
2072 sizeof(struct Vmxnet3_RxDesc));
2073 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
2074 rq->rx_ring[i].isOutOfOrder = 0;
2077 err = vmxnet3_create_pp(adapter, rq,
2078 rq->rx_ring[0].size + rq->rx_ring[1].size);
2082 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
2084 xdp_rxq_info_unreg(&rq->xdp_rxq);
2085 page_pool_destroy(rq->page_pool);
2086 rq->page_pool = NULL;
2088 /* at least has 1 rx buffer for the 1st ring */
2091 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2093 /* reset the comp ring */
2094 rq->comp_ring.next2proc = 0;
2095 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
2096 sizeof(struct Vmxnet3_RxCompDesc));
2097 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2100 rq->rx_ctx.skb = NULL;
2102 /* stats are not reset */
2108 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
2112 for (i = 0; i < adapter->num_rx_queues; i++) {
2113 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
2114 if (unlikely(err)) {
2115 dev_err(&adapter->netdev->dev, "%s: failed to "
2116 "initialize rx queue%i\n",
2117 adapter->netdev->name, i);
2127 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2131 struct vmxnet3_rx_buf_info *bi;
2133 for (i = 0; i < 2; i++) {
2135 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
2136 rq->rx_ring[i].base = dma_alloc_coherent(
2137 &adapter->pdev->dev, sz,
2138 &rq->rx_ring[i].basePA,
2140 if (!rq->rx_ring[i].base) {
2141 netdev_err(adapter->netdev,
2142 "failed to allocate rx ring %d\n", i);
2147 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
2148 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
2149 rq->data_ring.base =
2150 dma_alloc_coherent(&adapter->pdev->dev, sz,
2151 &rq->data_ring.basePA,
2153 if (!rq->data_ring.base) {
2154 netdev_err(adapter->netdev,
2155 "rx data ring will be disabled\n");
2156 adapter->rxdataring_enabled = false;
2159 rq->data_ring.base = NULL;
2160 rq->data_ring.desc_size = 0;
2163 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
2164 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2165 &rq->comp_ring.basePA,
2167 if (!rq->comp_ring.base) {
2168 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
2172 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2173 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2174 dev_to_node(&adapter->pdev->dev));
2178 rq->buf_info[0] = bi;
2179 rq->buf_info[1] = bi + rq->rx_ring[0].size;
2184 vmxnet3_rq_destroy(rq, adapter);
2190 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2194 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2196 for (i = 0; i < adapter->num_rx_queues; i++) {
2197 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2198 if (unlikely(err)) {
2199 dev_err(&adapter->netdev->dev,
2200 "%s: failed to create rx queue%i\n",
2201 adapter->netdev->name, i);
2206 if (!adapter->rxdataring_enabled)
2207 vmxnet3_rq_destroy_all_rxdataring(adapter);
2211 vmxnet3_rq_destroy_all(adapter);
2216 /* Multiple queue aware polling function for tx and rx */
2219 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2221 int rcd_done = 0, i;
2222 if (unlikely(adapter->shared->ecr))
2223 vmxnet3_process_events(adapter);
2224 for (i = 0; i < adapter->num_tx_queues; i++)
2225 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2227 for (i = 0; i < adapter->num_rx_queues; i++)
2228 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2235 vmxnet3_poll(struct napi_struct *napi, int budget)
2237 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
2238 struct vmxnet3_rx_queue, napi);
2241 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2243 if (rxd_done < budget) {
2244 napi_complete_done(napi, rxd_done);
2245 vmxnet3_enable_all_intrs(rx_queue->adapter);
2251 * NAPI polling function for MSI-X mode with multiple Rx queues
2252 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2256 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
2258 struct vmxnet3_rx_queue *rq = container_of(napi,
2259 struct vmxnet3_rx_queue, napi);
2260 struct vmxnet3_adapter *adapter = rq->adapter;
2263 /* When sharing interrupt with corresponding tx queue, process
2264 * tx completions in that queue as well
2266 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2267 struct vmxnet3_tx_queue *tq =
2268 &adapter->tx_queue[rq - adapter->rx_queue];
2269 vmxnet3_tq_tx_complete(tq, adapter);
2272 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2274 if (rxd_done < budget) {
2275 napi_complete_done(napi, rxd_done);
2276 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2282 #ifdef CONFIG_PCI_MSI
2285 * Handle completion interrupts on tx queues
2286 * Returns whether or not the intr is handled
2290 vmxnet3_msix_tx(int irq, void *data)
2292 struct vmxnet3_tx_queue *tq = data;
2293 struct vmxnet3_adapter *adapter = tq->adapter;
2295 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2296 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2298 /* Handle the case where only one irq is allocate for all tx queues */
2299 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2301 for (i = 0; i < adapter->num_tx_queues; i++) {
2302 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2303 vmxnet3_tq_tx_complete(txq, adapter);
2306 vmxnet3_tq_tx_complete(tq, adapter);
2308 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2315 * Handle completion interrupts on rx queues. Returns whether or not the
2320 vmxnet3_msix_rx(int irq, void *data)
2322 struct vmxnet3_rx_queue *rq = data;
2323 struct vmxnet3_adapter *adapter = rq->adapter;
2325 /* disable intr if needed */
2326 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2327 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2328 napi_schedule(&rq->napi);
2334 *----------------------------------------------------------------------------
2336 * vmxnet3_msix_event --
2338 * vmxnet3 msix event intr handler
2341 * whether or not the intr is handled
2343 *----------------------------------------------------------------------------
2347 vmxnet3_msix_event(int irq, void *data)
2349 struct net_device *dev = data;
2350 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2352 /* disable intr if needed */
2353 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2354 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2356 if (adapter->shared->ecr)
2357 vmxnet3_process_events(adapter);
2359 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2364 #endif /* CONFIG_PCI_MSI */
2367 /* Interrupt handler for vmxnet3 */
2369 vmxnet3_intr(int irq, void *dev_id)
2371 struct net_device *dev = dev_id;
2372 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2374 if (adapter->intr.type == VMXNET3_IT_INTX) {
2375 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2376 if (unlikely(icr == 0))
2382 /* disable intr if needed */
2383 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2384 vmxnet3_disable_all_intrs(adapter);
2386 napi_schedule(&adapter->rx_queue[0].napi);
2391 #ifdef CONFIG_NET_POLL_CONTROLLER
2393 /* netpoll callback. */
2395 vmxnet3_netpoll(struct net_device *netdev)
2397 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2399 switch (adapter->intr.type) {
2400 #ifdef CONFIG_PCI_MSI
2401 case VMXNET3_IT_MSIX: {
2403 for (i = 0; i < adapter->num_rx_queues; i++)
2404 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2408 case VMXNET3_IT_MSI:
2410 vmxnet3_intr(0, adapter->netdev);
2415 #endif /* CONFIG_NET_POLL_CONTROLLER */
2418 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2420 struct vmxnet3_intr *intr = &adapter->intr;
2424 #ifdef CONFIG_PCI_MSI
2425 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2426 for (i = 0; i < adapter->num_tx_queues; i++) {
2427 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2428 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2429 adapter->netdev->name, vector);
2431 intr->msix_entries[vector].vector,
2433 adapter->tx_queue[i].name,
2434 &adapter->tx_queue[i]);
2436 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2437 adapter->netdev->name, vector);
2440 dev_err(&adapter->netdev->dev,
2441 "Failed to request irq for MSIX, %s, "
2443 adapter->tx_queue[i].name, err);
2447 /* Handle the case where only 1 MSIx was allocated for
2449 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2450 for (; i < adapter->num_tx_queues; i++)
2451 adapter->tx_queue[i].comp_ring.intr_idx
2456 adapter->tx_queue[i].comp_ring.intr_idx
2460 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2463 for (i = 0; i < adapter->num_rx_queues; i++) {
2464 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2465 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2466 adapter->netdev->name, vector);
2468 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2469 adapter->netdev->name, vector);
2470 err = request_irq(intr->msix_entries[vector].vector,
2472 adapter->rx_queue[i].name,
2473 &(adapter->rx_queue[i]));
2475 netdev_err(adapter->netdev,
2476 "Failed to request irq for MSIX, "
2478 adapter->rx_queue[i].name, err);
2482 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2485 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2486 adapter->netdev->name, vector);
2487 err = request_irq(intr->msix_entries[vector].vector,
2488 vmxnet3_msix_event, 0,
2489 intr->event_msi_vector_name, adapter->netdev);
2490 intr->event_intr_idx = vector;
2492 } else if (intr->type == VMXNET3_IT_MSI) {
2493 adapter->num_rx_queues = 1;
2494 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2495 adapter->netdev->name, adapter->netdev);
2498 adapter->num_rx_queues = 1;
2499 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2500 IRQF_SHARED, adapter->netdev->name,
2502 #ifdef CONFIG_PCI_MSI
2505 intr->num_intrs = vector + 1;
2507 netdev_err(adapter->netdev,
2508 "Failed to request irq (intr type:%d), error %d\n",
2511 /* Number of rx queues will not change after this */
2512 for (i = 0; i < adapter->num_rx_queues; i++) {
2513 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2515 rq->qid2 = i + adapter->num_rx_queues;
2516 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2519 /* init our intr settings */
2520 for (i = 0; i < intr->num_intrs; i++)
2521 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2522 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2523 adapter->intr.event_intr_idx = 0;
2524 for (i = 0; i < adapter->num_tx_queues; i++)
2525 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2526 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2529 netdev_info(adapter->netdev,
2530 "intr type %u, mode %u, %u vectors allocated\n",
2531 intr->type, intr->mask_mode, intr->num_intrs);
2539 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2541 struct vmxnet3_intr *intr = &adapter->intr;
2542 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2544 switch (intr->type) {
2545 #ifdef CONFIG_PCI_MSI
2546 case VMXNET3_IT_MSIX:
2550 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2551 for (i = 0; i < adapter->num_tx_queues; i++) {
2552 free_irq(intr->msix_entries[vector++].vector,
2553 &(adapter->tx_queue[i]));
2554 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2559 for (i = 0; i < adapter->num_rx_queues; i++) {
2560 free_irq(intr->msix_entries[vector++].vector,
2561 &(adapter->rx_queue[i]));
2564 free_irq(intr->msix_entries[vector].vector,
2566 BUG_ON(vector >= intr->num_intrs);
2570 case VMXNET3_IT_MSI:
2571 free_irq(adapter->pdev->irq, adapter->netdev);
2573 case VMXNET3_IT_INTX:
2574 free_irq(adapter->pdev->irq, adapter->netdev);
2583 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2585 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2588 /* allow untagged pkts */
2589 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2591 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2592 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2597 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2599 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2601 if (!(netdev->flags & IFF_PROMISC)) {
2602 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2603 unsigned long flags;
2605 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2606 spin_lock_irqsave(&adapter->cmd_lock, flags);
2607 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2608 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2609 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2612 set_bit(vid, adapter->active_vlans);
2619 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2623 if (!(netdev->flags & IFF_PROMISC)) {
2624 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2625 unsigned long flags;
2627 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2628 spin_lock_irqsave(&adapter->cmd_lock, flags);
2629 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2630 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2631 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2634 clear_bit(vid, adapter->active_vlans);
2641 vmxnet3_copy_mc(struct net_device *netdev)
2644 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2646 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2648 /* We may be called with BH disabled */
2649 buf = kmalloc(sz, GFP_ATOMIC);
2651 struct netdev_hw_addr *ha;
2654 netdev_for_each_mc_addr(ha, netdev)
2655 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2664 vmxnet3_set_mc(struct net_device *netdev)
2666 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2667 unsigned long flags;
2668 struct Vmxnet3_RxFilterConf *rxConf =
2669 &adapter->shared->devRead.rxFilterConf;
2670 u8 *new_table = NULL;
2671 dma_addr_t new_table_pa = 0;
2672 bool new_table_pa_valid = false;
2673 u32 new_mode = VMXNET3_RXM_UCAST;
2675 if (netdev->flags & IFF_PROMISC) {
2676 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2677 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2679 new_mode |= VMXNET3_RXM_PROMISC;
2681 vmxnet3_restore_vlan(adapter);
2684 if (netdev->flags & IFF_BROADCAST)
2685 new_mode |= VMXNET3_RXM_BCAST;
2687 if (netdev->flags & IFF_ALLMULTI)
2688 new_mode |= VMXNET3_RXM_ALL_MULTI;
2690 if (!netdev_mc_empty(netdev)) {
2691 new_table = vmxnet3_copy_mc(netdev);
2693 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2695 rxConf->mfTableLen = cpu_to_le16(sz);
2696 new_table_pa = dma_map_single(
2697 &adapter->pdev->dev,
2701 if (!dma_mapping_error(&adapter->pdev->dev,
2703 new_mode |= VMXNET3_RXM_MCAST;
2704 new_table_pa_valid = true;
2705 rxConf->mfTablePA = cpu_to_le64(
2709 if (!new_table_pa_valid) {
2711 "failed to copy mcast list, setting ALL_MULTI\n");
2712 new_mode |= VMXNET3_RXM_ALL_MULTI;
2716 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2717 rxConf->mfTableLen = 0;
2718 rxConf->mfTablePA = 0;
2721 spin_lock_irqsave(&adapter->cmd_lock, flags);
2722 if (new_mode != rxConf->rxMode) {
2723 rxConf->rxMode = cpu_to_le32(new_mode);
2724 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2725 VMXNET3_CMD_UPDATE_RX_MODE);
2726 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2727 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2730 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2731 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2732 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2734 if (new_table_pa_valid)
2735 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2736 rxConf->mfTableLen, DMA_TO_DEVICE);
2741 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2745 for (i = 0; i < adapter->num_rx_queues; i++)
2746 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2751 * Set up driver_shared based on settings in adapter.
2755 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2757 struct Vmxnet3_DriverShared *shared = adapter->shared;
2758 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2759 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2760 struct Vmxnet3_TxQueueConf *tqc;
2761 struct Vmxnet3_RxQueueConf *rqc;
2764 memset(shared, 0, sizeof(*shared));
2766 /* driver settings */
2767 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2768 devRead->misc.driverInfo.version = cpu_to_le32(
2769 VMXNET3_DRIVER_VERSION_NUM);
2770 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2771 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2772 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2773 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2774 *((u32 *)&devRead->misc.driverInfo.gos));
2775 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2776 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2778 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2779 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2781 /* set up feature flags */
2782 if (adapter->netdev->features & NETIF_F_RXCSUM)
2783 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2785 if (adapter->netdev->features & NETIF_F_LRO) {
2786 devRead->misc.uptFeatures |= UPT1_F_LRO;
2787 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2789 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2790 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2792 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2793 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2794 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2796 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2797 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2798 devRead->misc.queueDescLen = cpu_to_le32(
2799 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2800 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2802 /* tx queue settings */
2803 devRead->misc.numTxQueues = adapter->num_tx_queues;
2804 for (i = 0; i < adapter->num_tx_queues; i++) {
2805 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2806 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2807 tqc = &adapter->tqd_start[i].conf;
2808 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2809 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2810 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2811 tqc->ddPA = cpu_to_le64(~0ULL);
2812 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2813 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2814 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2815 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2816 tqc->ddLen = cpu_to_le32(0);
2817 tqc->intrIdx = tq->comp_ring.intr_idx;
2820 /* rx queue settings */
2821 devRead->misc.numRxQueues = adapter->num_rx_queues;
2822 for (i = 0; i < adapter->num_rx_queues; i++) {
2823 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2824 rqc = &adapter->rqd_start[i].conf;
2825 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2826 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2827 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2828 rqc->ddPA = cpu_to_le64(~0ULL);
2829 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2830 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2831 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2832 rqc->ddLen = cpu_to_le32(0);
2833 rqc->intrIdx = rq->comp_ring.intr_idx;
2834 if (VMXNET3_VERSION_GE_3(adapter)) {
2835 rqc->rxDataRingBasePA =
2836 cpu_to_le64(rq->data_ring.basePA);
2837 rqc->rxDataRingDescSize =
2838 cpu_to_le16(rq->data_ring.desc_size);
2843 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2846 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2848 devRead->misc.uptFeatures |= UPT1_F_RSS;
2849 devRead->misc.numRxQueues = adapter->num_rx_queues;
2850 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2851 UPT1_RSS_HASH_TYPE_IPV4 |
2852 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2853 UPT1_RSS_HASH_TYPE_IPV6;
2854 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2855 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2856 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2857 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2859 for (i = 0; i < rssConf->indTableSize; i++)
2860 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2861 i, adapter->num_rx_queues);
2863 devRead->rssConfDesc.confVer = 1;
2864 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2865 devRead->rssConfDesc.confPA =
2866 cpu_to_le64(adapter->rss_conf_pa);
2869 #endif /* VMXNET3_RSS */
2872 if (!VMXNET3_VERSION_GE_6(adapter) ||
2873 !adapter->queuesExtEnabled) {
2874 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2876 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2877 for (i = 0; i < adapter->intr.num_intrs; i++)
2878 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2880 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2881 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2883 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2885 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2886 for (i = 0; i < adapter->intr.num_intrs; i++)
2887 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2889 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2890 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2893 /* rx filter settings */
2894 devRead->rxFilterConf.rxMode = 0;
2895 vmxnet3_restore_vlan(adapter);
2896 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2898 /* the rest are already zeroed */
2902 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
2904 struct Vmxnet3_DriverShared *shared = adapter->shared;
2905 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2906 unsigned long flags;
2908 if (!VMXNET3_VERSION_GE_7(adapter))
2911 cmdInfo->ringBufSize = adapter->ringBufSize;
2912 spin_lock_irqsave(&adapter->cmd_lock, flags);
2913 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2914 VMXNET3_CMD_SET_RING_BUFFER_SIZE);
2915 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2919 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2921 struct Vmxnet3_DriverShared *shared = adapter->shared;
2922 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2923 unsigned long flags;
2925 if (!VMXNET3_VERSION_GE_3(adapter))
2928 spin_lock_irqsave(&adapter->cmd_lock, flags);
2929 cmdInfo->varConf.confVer = 1;
2930 cmdInfo->varConf.confLen =
2931 cpu_to_le32(sizeof(*adapter->coal_conf));
2932 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2934 if (adapter->default_coal_mode) {
2935 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2936 VMXNET3_CMD_GET_COALESCE);
2938 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2939 VMXNET3_CMD_SET_COALESCE);
2942 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2946 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2948 struct Vmxnet3_DriverShared *shared = adapter->shared;
2949 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2950 unsigned long flags;
2952 if (!VMXNET3_VERSION_GE_4(adapter))
2955 spin_lock_irqsave(&adapter->cmd_lock, flags);
2957 if (adapter->default_rss_fields) {
2958 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2959 VMXNET3_CMD_GET_RSS_FIELDS);
2960 adapter->rss_fields =
2961 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2963 if (VMXNET3_VERSION_GE_7(adapter)) {
2964 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
2965 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
2966 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2967 VMXNET3_CAP_UDP_RSS)) {
2968 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
2970 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
2973 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
2974 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2975 VMXNET3_CAP_ESP_RSS_IPV4)) {
2976 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
2978 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
2981 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
2982 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2983 VMXNET3_CAP_ESP_RSS_IPV6)) {
2984 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
2986 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
2989 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
2990 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
2991 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2993 cmdInfo->setRssFields = adapter->rss_fields;
2994 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2995 VMXNET3_CMD_SET_RSS_FIELDS);
2996 /* Not all requested RSS may get applied, so get and
2997 * cache what was actually applied.
2999 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3000 VMXNET3_CMD_GET_RSS_FIELDS);
3001 adapter->rss_fields =
3002 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3005 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3009 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
3013 unsigned long flags;
3015 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
3016 " ring sizes %u %u %u\n", adapter->netdev->name,
3017 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
3018 adapter->tx_queue[0].tx_ring.size,
3019 adapter->rx_queue[0].rx_ring[0].size,
3020 adapter->rx_queue[0].rx_ring[1].size);
3022 vmxnet3_tq_init_all(adapter);
3023 err = vmxnet3_rq_init_all(adapter);
3025 netdev_err(adapter->netdev,
3026 "Failed to init rx queue error %d\n", err);
3030 err = vmxnet3_request_irqs(adapter);
3032 netdev_err(adapter->netdev,
3033 "Failed to setup irq for error %d\n", err);
3037 vmxnet3_setup_driver_shared(adapter);
3039 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
3040 adapter->shared_pa));
3041 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
3042 adapter->shared_pa));
3043 spin_lock_irqsave(&adapter->cmd_lock, flags);
3044 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3045 VMXNET3_CMD_ACTIVATE_DEV);
3046 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3047 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3050 netdev_err(adapter->netdev,
3051 "Failed to activate dev: error %u\n", ret);
3056 vmxnet3_init_bufsize(adapter);
3057 vmxnet3_init_coalesce(adapter);
3058 vmxnet3_init_rssfields(adapter);
3060 for (i = 0; i < adapter->num_rx_queues; i++) {
3061 VMXNET3_WRITE_BAR0_REG(adapter,
3062 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
3063 adapter->rx_queue[i].rx_ring[0].next2fill);
3064 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
3065 (i * VMXNET3_REG_ALIGN)),
3066 adapter->rx_queue[i].rx_ring[1].next2fill);
3069 /* Apply the rx filter settins last. */
3070 vmxnet3_set_mc(adapter->netdev);
3073 * Check link state when first activating device. It will start the
3074 * tx queue if the link is up.
3076 vmxnet3_check_link(adapter, true);
3077 netif_tx_wake_all_queues(adapter->netdev);
3078 for (i = 0; i < adapter->num_rx_queues; i++)
3079 napi_enable(&adapter->rx_queue[i].napi);
3080 vmxnet3_enable_all_intrs(adapter);
3081 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3085 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
3086 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
3087 vmxnet3_free_irqs(adapter);
3090 /* free up buffers we allocated */
3091 vmxnet3_rq_cleanup_all(adapter);
3097 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
3099 unsigned long flags;
3100 spin_lock_irqsave(&adapter->cmd_lock, flags);
3101 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
3102 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3107 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
3110 unsigned long flags;
3111 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
3115 spin_lock_irqsave(&adapter->cmd_lock, flags);
3116 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3117 VMXNET3_CMD_QUIESCE_DEV);
3118 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3119 vmxnet3_disable_all_intrs(adapter);
3121 for (i = 0; i < adapter->num_rx_queues; i++)
3122 napi_disable(&adapter->rx_queue[i].napi);
3123 netif_tx_disable(adapter->netdev);
3124 adapter->link_speed = 0;
3125 netif_carrier_off(adapter->netdev);
3127 vmxnet3_tq_cleanup_all(adapter);
3128 vmxnet3_rq_cleanup_all(adapter);
3129 vmxnet3_free_irqs(adapter);
3135 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
3140 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
3142 tmp = (mac[5] << 8) | mac[4];
3143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
3148 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
3150 struct sockaddr *addr = p;
3151 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3153 dev_addr_set(netdev, addr->sa_data);
3154 vmxnet3_write_mac_addr(adapter, addr->sa_data);
3160 /* ==================== initialization and cleanup routines ============ */
3163 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
3166 unsigned long mmio_start, mmio_len;
3167 struct pci_dev *pdev = adapter->pdev;
3169 err = pci_enable_device(pdev);
3171 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3175 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
3176 vmxnet3_driver_name);
3179 "Failed to request region for adapter: error %d\n", err);
3180 goto err_enable_device;
3183 pci_set_master(pdev);
3185 mmio_start = pci_resource_start(pdev, 0);
3186 mmio_len = pci_resource_len(pdev, 0);
3187 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3188 if (!adapter->hw_addr0) {
3189 dev_err(&pdev->dev, "Failed to map bar0\n");
3194 mmio_start = pci_resource_start(pdev, 1);
3195 mmio_len = pci_resource_len(pdev, 1);
3196 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3197 if (!adapter->hw_addr1) {
3198 dev_err(&pdev->dev, "Failed to map bar1\n");
3205 iounmap(adapter->hw_addr0);
3207 pci_release_selected_regions(pdev, (1 << 2) - 1);
3209 pci_disable_device(pdev);
3215 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3217 BUG_ON(!adapter->pdev);
3219 iounmap(adapter->hw_addr0);
3220 iounmap(adapter->hw_addr1);
3221 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3222 pci_disable_device(adapter->pdev);
3227 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3229 size_t sz, i, ring0_size, ring1_size, comp_size;
3230 /* With version7 ring1 will have only T0 buffers */
3231 if (!VMXNET3_VERSION_GE_7(adapter)) {
3232 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3233 VMXNET3_MAX_ETH_HDR_SIZE) {
3234 adapter->skb_buf_size = adapter->netdev->mtu +
3235 VMXNET3_MAX_ETH_HDR_SIZE;
3236 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3237 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3239 adapter->rx_buf_per_pkt = 1;
3241 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3242 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3243 VMXNET3_MAX_ETH_HDR_SIZE;
3244 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3247 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3248 VMXNET3_MAX_SKB_BUF_SIZE);
3249 adapter->rx_buf_per_pkt = 1;
3250 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3251 adapter->ringBufSize.ring1BufSizeType1 = 0;
3252 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3256 * for simplicity, force the ring0 size to be a multiple of
3257 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3259 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3260 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3261 ring0_size = (ring0_size + sz - 1) / sz * sz;
3262 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
3264 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3265 ring1_size = (ring1_size + sz - 1) / sz * sz;
3266 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
3268 /* For v7 and later, keep ring size power of 2 for UPT */
3269 if (VMXNET3_VERSION_GE_7(adapter)) {
3270 ring0_size = rounddown_pow_of_two(ring0_size);
3271 ring1_size = rounddown_pow_of_two(ring1_size);
3273 comp_size = ring0_size + ring1_size;
3275 for (i = 0; i < adapter->num_rx_queues; i++) {
3276 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3278 rq->rx_ring[0].size = ring0_size;
3279 rq->rx_ring[1].size = ring1_size;
3280 rq->comp_ring.size = comp_size;
3286 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3287 u32 rx_ring_size, u32 rx_ring2_size,
3288 u16 txdata_desc_size, u16 rxdata_desc_size)
3292 for (i = 0; i < adapter->num_tx_queues; i++) {
3293 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3294 tq->tx_ring.size = tx_ring_size;
3295 tq->data_ring.size = tx_ring_size;
3296 tq->comp_ring.size = tx_ring_size;
3297 tq->txdata_desc_size = txdata_desc_size;
3298 tq->shared = &adapter->tqd_start[i].ctrl;
3300 tq->adapter = adapter;
3302 err = vmxnet3_tq_create(tq, adapter);
3304 * Too late to change num_tx_queues. We cannot do away with
3305 * lesser number of queues than what we asked for
3311 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3312 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3313 vmxnet3_adjust_rx_ring_size(adapter);
3315 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3316 for (i = 0; i < adapter->num_rx_queues; i++) {
3317 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3318 /* qid and qid2 for rx queues will be assigned later when num
3319 * of rx queues is finalized after allocating intrs */
3320 rq->shared = &adapter->rqd_start[i].ctrl;
3321 rq->adapter = adapter;
3322 rq->data_ring.desc_size = rxdata_desc_size;
3323 err = vmxnet3_rq_create(rq, adapter);
3326 netdev_err(adapter->netdev,
3327 "Could not allocate any rx queues. "
3331 netdev_info(adapter->netdev,
3332 "Number of rx queues changed "
3334 adapter->num_rx_queues = i;
3341 if (!adapter->rxdataring_enabled)
3342 vmxnet3_rq_destroy_all_rxdataring(adapter);
3346 vmxnet3_tq_destroy_all(adapter);
3351 vmxnet3_open(struct net_device *netdev)
3353 struct vmxnet3_adapter *adapter;
3356 adapter = netdev_priv(netdev);
3358 for (i = 0; i < adapter->num_tx_queues; i++)
3359 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3361 if (VMXNET3_VERSION_GE_3(adapter)) {
3362 unsigned long flags;
3363 u16 txdata_desc_size;
3365 spin_lock_irqsave(&adapter->cmd_lock, flags);
3366 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3367 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3368 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
3370 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3372 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3373 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3374 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3375 adapter->txdata_desc_size =
3376 sizeof(struct Vmxnet3_TxDataDesc);
3378 adapter->txdata_desc_size = txdata_desc_size;
3381 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3384 err = vmxnet3_create_queues(adapter,
3385 adapter->tx_ring_size,
3386 adapter->rx_ring_size,
3387 adapter->rx_ring2_size,
3388 adapter->txdata_desc_size,
3389 adapter->rxdata_desc_size);
3393 err = vmxnet3_activate_dev(adapter);
3400 vmxnet3_rq_destroy_all(adapter);
3401 vmxnet3_tq_destroy_all(adapter);
3408 vmxnet3_close(struct net_device *netdev)
3410 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3413 * Reset_work may be in the middle of resetting the device, wait for its
3416 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3417 usleep_range(1000, 2000);
3419 vmxnet3_quiesce_dev(adapter);
3421 vmxnet3_rq_destroy_all(adapter);
3422 vmxnet3_tq_destroy_all(adapter);
3424 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3432 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3437 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3438 * vmxnet3_close() will deadlock.
3440 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3442 /* we need to enable NAPI, otherwise dev_close will deadlock */
3443 for (i = 0; i < adapter->num_rx_queues; i++)
3444 napi_enable(&adapter->rx_queue[i].napi);
3446 * Need to clear the quiesce bit to ensure that vmxnet3_close
3447 * can quiesce the device properly
3449 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3450 dev_close(adapter->netdev);
3455 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3457 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3460 WRITE_ONCE(netdev->mtu, new_mtu);
3463 * Reset_work may be in the middle of resetting the device, wait for its
3466 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3467 usleep_range(1000, 2000);
3469 if (netif_running(netdev)) {
3470 vmxnet3_quiesce_dev(adapter);
3471 vmxnet3_reset_dev(adapter);
3473 /* we need to re-create the rx queue based on the new mtu */
3474 vmxnet3_rq_destroy_all(adapter);
3475 vmxnet3_adjust_rx_ring_size(adapter);
3476 err = vmxnet3_rq_create_all(adapter);
3479 "failed to re-create rx queues, "
3480 " error %d. Closing it.\n", err);
3484 err = vmxnet3_activate_dev(adapter);
3487 "failed to re-activate, error %d. "
3488 "Closing it\n", err);
3494 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3496 vmxnet3_force_close(adapter);
3503 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3505 struct net_device *netdev = adapter->netdev;
3507 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3508 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3509 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3510 NETIF_F_LRO | NETIF_F_HIGHDMA;
3512 if (VMXNET3_VERSION_GE_4(adapter)) {
3513 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3514 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3516 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3517 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3518 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3519 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3520 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3523 if (VMXNET3_VERSION_GE_7(adapter)) {
3524 unsigned long flags;
3526 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3527 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
3528 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3530 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3531 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
3532 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3534 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3535 VMXNET3_CAP_GENEVE_TSO)) {
3536 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3538 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3539 VMXNET3_CAP_VXLAN_TSO)) {
3540 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3542 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3543 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
3544 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3546 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3547 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
3548 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3551 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3552 spin_lock_irqsave(&adapter->cmd_lock, flags);
3553 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3554 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3555 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3557 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3558 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3559 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3560 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3561 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3562 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3564 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3565 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3566 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3567 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3571 netdev->vlan_features = netdev->hw_features &
3572 ~(NETIF_F_HW_VLAN_CTAG_TX |
3573 NETIF_F_HW_VLAN_CTAG_RX);
3574 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3579 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3583 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3586 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3587 mac[4] = tmp & 0xff;
3588 mac[5] = (tmp >> 8) & 0xff;
3591 #ifdef CONFIG_PCI_MSI
3594 * Enable MSIx vectors.
3596 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3598 * number of vectors which were enabled otherwise (this number is greater
3599 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3603 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3605 int ret = pci_enable_msix_range(adapter->pdev,
3606 adapter->intr.msix_entries, nvec, nvec);
3608 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3609 dev_err(&adapter->netdev->dev,
3610 "Failed to enable %d MSI-X, trying %d\n",
3611 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3613 ret = pci_enable_msix_range(adapter->pdev,
3614 adapter->intr.msix_entries,
3615 VMXNET3_LINUX_MIN_MSIX_VECT,
3616 VMXNET3_LINUX_MIN_MSIX_VECT);
3620 dev_err(&adapter->netdev->dev,
3621 "Failed to enable MSI-X, error: %d\n", ret);
3628 #endif /* CONFIG_PCI_MSI */
3631 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3634 unsigned long flags;
3637 spin_lock_irqsave(&adapter->cmd_lock, flags);
3638 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3639 VMXNET3_CMD_GET_CONF_INTR);
3640 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3641 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3642 adapter->intr.type = cfg & 0x3;
3643 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3645 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3646 adapter->intr.type = VMXNET3_IT_MSIX;
3649 #ifdef CONFIG_PCI_MSI
3650 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3651 int i, nvec, nvec_allocated;
3653 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3654 1 : adapter->num_tx_queues;
3655 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3656 0 : adapter->num_rx_queues;
3657 nvec += 1; /* for link event */
3658 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3659 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3661 for (i = 0; i < nvec; i++)
3662 adapter->intr.msix_entries[i].entry = i;
3664 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3665 if (nvec_allocated < 0)
3668 /* If we cannot allocate one MSIx vector per queue
3669 * then limit the number of rx queues to 1
3671 if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3672 nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3673 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3674 || adapter->num_rx_queues != 1) {
3675 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3676 netdev_err(adapter->netdev,
3677 "Number of rx queues : 1\n");
3678 adapter->num_rx_queues = 1;
3682 adapter->intr.num_intrs = nvec_allocated;
3686 /* If we cannot allocate MSIx vectors use only one rx queue */
3687 dev_info(&adapter->pdev->dev,
3688 "Failed to enable MSI-X, error %d. "
3689 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3691 adapter->intr.type = VMXNET3_IT_MSI;
3694 if (adapter->intr.type == VMXNET3_IT_MSI) {
3695 if (!pci_enable_msi(adapter->pdev)) {
3696 adapter->num_rx_queues = 1;
3697 adapter->intr.num_intrs = 1;
3701 #endif /* CONFIG_PCI_MSI */
3703 adapter->num_rx_queues = 1;
3704 dev_info(&adapter->netdev->dev,
3705 "Using INTx interrupt, #Rx queues: 1.\n");
3706 adapter->intr.type = VMXNET3_IT_INTX;
3708 /* INT-X related setting */
3709 adapter->intr.num_intrs = 1;
3714 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3716 if (adapter->intr.type == VMXNET3_IT_MSIX)
3717 pci_disable_msix(adapter->pdev);
3718 else if (adapter->intr.type == VMXNET3_IT_MSI)
3719 pci_disable_msi(adapter->pdev);
3721 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3726 vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3728 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3729 adapter->tx_timeout_count++;
3731 netdev_err(adapter->netdev, "tx hang\n");
3732 schedule_work(&adapter->work);
3737 vmxnet3_reset_work(struct work_struct *data)
3739 struct vmxnet3_adapter *adapter;
3741 adapter = container_of(data, struct vmxnet3_adapter, work);
3743 /* if another thread is resetting the device, no need to proceed */
3744 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3747 /* if the device is closed, we must leave it alone */
3749 if (netif_running(adapter->netdev)) {
3750 netdev_notice(adapter->netdev, "resetting\n");
3751 vmxnet3_quiesce_dev(adapter);
3752 vmxnet3_reset_dev(adapter);
3753 vmxnet3_activate_dev(adapter);
3755 netdev_info(adapter->netdev, "already closed\n");
3759 netif_wake_queue(adapter->netdev);
3760 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3765 vmxnet3_probe_device(struct pci_dev *pdev,
3766 const struct pci_device_id *id)
3768 static const struct net_device_ops vmxnet3_netdev_ops = {
3769 .ndo_open = vmxnet3_open,
3770 .ndo_stop = vmxnet3_close,
3771 .ndo_start_xmit = vmxnet3_xmit_frame,
3772 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3773 .ndo_change_mtu = vmxnet3_change_mtu,
3774 .ndo_fix_features = vmxnet3_fix_features,
3775 .ndo_set_features = vmxnet3_set_features,
3776 .ndo_features_check = vmxnet3_features_check,
3777 .ndo_get_stats64 = vmxnet3_get_stats64,
3778 .ndo_tx_timeout = vmxnet3_tx_timeout,
3779 .ndo_set_rx_mode = vmxnet3_set_mc,
3780 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3781 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3782 #ifdef CONFIG_NET_POLL_CONTROLLER
3783 .ndo_poll_controller = vmxnet3_netpoll,
3785 .ndo_bpf = vmxnet3_xdp,
3786 .ndo_xdp_xmit = vmxnet3_xdp_xmit,
3790 struct net_device *netdev;
3791 struct vmxnet3_adapter *adapter;
3797 unsigned long flags;
3799 if (!pci_msi_enabled())
3804 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3805 (int)num_online_cpus());
3811 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3812 (int)num_online_cpus());
3816 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3817 max(num_tx_queues, num_rx_queues));
3821 pci_set_drvdata(pdev, netdev);
3822 adapter = netdev_priv(netdev);
3823 adapter->netdev = netdev;
3824 adapter->pdev = pdev;
3826 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3827 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3828 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3830 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3832 dev_err(&pdev->dev, "dma_set_mask failed\n");
3836 spin_lock_init(&adapter->cmd_lock);
3837 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3838 sizeof(struct vmxnet3_adapter),
3840 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3841 dev_err(&pdev->dev, "Failed to map dma\n");
3845 adapter->shared = dma_alloc_coherent(
3846 &adapter->pdev->dev,
3847 sizeof(struct Vmxnet3_DriverShared),
3848 &adapter->shared_pa, GFP_KERNEL);
3849 if (!adapter->shared) {
3850 dev_err(&pdev->dev, "Failed to allocate memory\n");
3852 goto err_alloc_shared;
3855 err = vmxnet3_alloc_pci_resources(adapter);
3859 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3860 if (ver & (1 << VMXNET3_REV_7)) {
3861 VMXNET3_WRITE_BAR1_REG(adapter,
3863 1 << VMXNET3_REV_7);
3864 adapter->version = VMXNET3_REV_7 + 1;
3865 } else if (ver & (1 << VMXNET3_REV_6)) {
3866 VMXNET3_WRITE_BAR1_REG(adapter,
3868 1 << VMXNET3_REV_6);
3869 adapter->version = VMXNET3_REV_6 + 1;
3870 } else if (ver & (1 << VMXNET3_REV_5)) {
3871 VMXNET3_WRITE_BAR1_REG(adapter,
3873 1 << VMXNET3_REV_5);
3874 adapter->version = VMXNET3_REV_5 + 1;
3875 } else if (ver & (1 << VMXNET3_REV_4)) {
3876 VMXNET3_WRITE_BAR1_REG(adapter,
3878 1 << VMXNET3_REV_4);
3879 adapter->version = VMXNET3_REV_4 + 1;
3880 } else if (ver & (1 << VMXNET3_REV_3)) {
3881 VMXNET3_WRITE_BAR1_REG(adapter,
3883 1 << VMXNET3_REV_3);
3884 adapter->version = VMXNET3_REV_3 + 1;
3885 } else if (ver & (1 << VMXNET3_REV_2)) {
3886 VMXNET3_WRITE_BAR1_REG(adapter,
3888 1 << VMXNET3_REV_2);
3889 adapter->version = VMXNET3_REV_2 + 1;
3890 } else if (ver & (1 << VMXNET3_REV_1)) {
3891 VMXNET3_WRITE_BAR1_REG(adapter,
3893 1 << VMXNET3_REV_1);
3894 adapter->version = VMXNET3_REV_1 + 1;
3897 "Incompatible h/w version (0x%x) for adapter\n", ver);
3901 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3903 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3905 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3908 "Incompatible upt version (0x%x) for adapter\n", ver);
3913 if (VMXNET3_VERSION_GE_7(adapter)) {
3914 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
3915 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
3916 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3917 adapter->dev_caps[0] = adapter->devcap_supported[0] &
3918 (1UL << VMXNET3_CAP_LARGE_BAR);
3920 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
3921 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
3922 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
3923 adapter->dev_caps[0] |= adapter->devcap_supported[0] &
3924 (1UL << VMXNET3_CAP_OOORX_COMP);
3926 if (adapter->dev_caps[0])
3927 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3929 spin_lock_irqsave(&adapter->cmd_lock, flags);
3930 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3931 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3932 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3935 if (VMXNET3_VERSION_GE_7(adapter) &&
3936 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3937 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
3938 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
3939 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
3941 adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
3942 adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
3943 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
3946 if (VMXNET3_VERSION_GE_6(adapter)) {
3947 spin_lock_irqsave(&adapter->cmd_lock, flags);
3948 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3949 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
3950 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3951 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3953 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3954 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3956 adapter->num_rx_queues = min(num_rx_queues,
3957 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3958 adapter->num_tx_queues = min(num_tx_queues,
3959 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3961 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3962 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3963 adapter->queuesExtEnabled = true;
3965 adapter->queuesExtEnabled = false;
3968 adapter->queuesExtEnabled = false;
3969 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3970 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3971 adapter->num_rx_queues = min(num_rx_queues,
3972 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
3973 adapter->num_tx_queues = min(num_tx_queues,
3974 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
3976 dev_info(&pdev->dev,
3977 "# of Tx queues : %d, # of Rx queues : %d\n",
3978 adapter->num_tx_queues, adapter->num_rx_queues);
3980 adapter->rx_buf_per_pkt = 1;
3982 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3983 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3984 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3985 &adapter->queue_desc_pa,
3988 if (!adapter->tqd_start) {
3989 dev_err(&pdev->dev, "Failed to allocate memory\n");
3993 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3994 adapter->num_tx_queues);
3996 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3997 sizeof(struct Vmxnet3_PMConf),
3998 &adapter->pm_conf_pa,
4000 if (adapter->pm_conf == NULL) {
4007 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
4008 sizeof(struct UPT1_RSSConf),
4009 &adapter->rss_conf_pa,
4011 if (adapter->rss_conf == NULL) {
4015 #endif /* VMXNET3_RSS */
4017 if (VMXNET3_VERSION_GE_3(adapter)) {
4018 adapter->coal_conf =
4019 dma_alloc_coherent(&adapter->pdev->dev,
4020 sizeof(struct Vmxnet3_CoalesceScheme)
4022 &adapter->coal_conf_pa,
4024 if (!adapter->coal_conf) {
4028 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
4029 adapter->default_coal_mode = true;
4032 if (VMXNET3_VERSION_GE_4(adapter)) {
4033 adapter->default_rss_fields = true;
4034 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
4037 SET_NETDEV_DEV(netdev, &pdev->dev);
4038 vmxnet3_declare_features(adapter);
4039 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
4040 NETDEV_XDP_ACT_NDO_XMIT;
4042 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
4043 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
4045 if (adapter->num_tx_queues == adapter->num_rx_queues)
4046 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
4048 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
4050 vmxnet3_alloc_intr_resources(adapter);
4053 if (adapter->num_rx_queues > 1 &&
4054 adapter->intr.type == VMXNET3_IT_MSIX) {
4055 adapter->rss = true;
4056 netdev->hw_features |= NETIF_F_RXHASH;
4057 netdev->features |= NETIF_F_RXHASH;
4058 dev_dbg(&pdev->dev, "RSS is enabled.\n");
4060 adapter->rss = false;
4064 vmxnet3_read_mac_addr(adapter, mac);
4065 dev_addr_set(netdev, mac);
4067 netdev->netdev_ops = &vmxnet3_netdev_ops;
4068 vmxnet3_set_ethtool_ops(netdev);
4069 netdev->watchdog_timeo = 5 * HZ;
4071 /* MTU range: 60 - 9190 */
4072 netdev->min_mtu = VMXNET3_MIN_MTU;
4073 if (VMXNET3_VERSION_GE_6(adapter))
4074 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
4076 netdev->max_mtu = VMXNET3_MAX_MTU;
4078 INIT_WORK(&adapter->work, vmxnet3_reset_work);
4079 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4081 if (adapter->intr.type == VMXNET3_IT_MSIX) {
4083 for (i = 0; i < adapter->num_rx_queues; i++) {
4084 netif_napi_add(adapter->netdev,
4085 &adapter->rx_queue[i].napi,
4086 vmxnet3_poll_rx_only);
4089 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
4093 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4094 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
4096 netif_carrier_off(netdev);
4097 err = register_netdev(netdev);
4100 dev_err(&pdev->dev, "Failed to register adapter\n");
4104 vmxnet3_check_link(adapter, false);
4108 if (VMXNET3_VERSION_GE_3(adapter)) {
4109 dma_free_coherent(&adapter->pdev->dev,
4110 sizeof(struct Vmxnet3_CoalesceScheme),
4111 adapter->coal_conf, adapter->coal_conf_pa);
4113 vmxnet3_free_intr_resources(adapter);
4116 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4117 adapter->rss_conf, adapter->rss_conf_pa);
4120 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4121 adapter->pm_conf, adapter->pm_conf_pa);
4123 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4124 adapter->queue_desc_pa);
4126 vmxnet3_free_pci_resources(adapter);
4128 dma_free_coherent(&adapter->pdev->dev,
4129 sizeof(struct Vmxnet3_DriverShared),
4130 adapter->shared, adapter->shared_pa);
4132 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4133 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4135 free_netdev(netdev);
4141 vmxnet3_remove_device(struct pci_dev *pdev)
4143 struct net_device *netdev = pci_get_drvdata(pdev);
4144 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4146 int num_rx_queues, rx_queues;
4147 unsigned long flags;
4151 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
4152 (int)num_online_cpus());
4156 if (!VMXNET3_VERSION_GE_6(adapter)) {
4157 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4159 if (VMXNET3_VERSION_GE_6(adapter)) {
4160 spin_lock_irqsave(&adapter->cmd_lock, flags);
4161 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4162 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4163 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4164 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4166 rx_queues = (rx_queues >> 8) & 0xff;
4168 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4169 num_rx_queues = min(num_rx_queues, rx_queues);
4171 num_rx_queues = min(num_rx_queues,
4172 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4175 cancel_work_sync(&adapter->work);
4177 unregister_netdev(netdev);
4179 vmxnet3_free_intr_resources(adapter);
4180 vmxnet3_free_pci_resources(adapter);
4181 if (VMXNET3_VERSION_GE_3(adapter)) {
4182 dma_free_coherent(&adapter->pdev->dev,
4183 sizeof(struct Vmxnet3_CoalesceScheme),
4184 adapter->coal_conf, adapter->coal_conf_pa);
4187 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4188 adapter->rss_conf, adapter->rss_conf_pa);
4190 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4191 adapter->pm_conf, adapter->pm_conf_pa);
4193 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4194 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4195 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4196 adapter->queue_desc_pa);
4197 dma_free_coherent(&adapter->pdev->dev,
4198 sizeof(struct Vmxnet3_DriverShared),
4199 adapter->shared, adapter->shared_pa);
4200 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4201 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4202 free_netdev(netdev);
4205 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4207 struct net_device *netdev = pci_get_drvdata(pdev);
4208 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4209 unsigned long flags;
4211 /* Reset_work may be in the middle of resetting the device, wait for its
4214 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4215 usleep_range(1000, 2000);
4217 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4219 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4222 spin_lock_irqsave(&adapter->cmd_lock, flags);
4223 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4224 VMXNET3_CMD_QUIESCE_DEV);
4225 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4226 vmxnet3_disable_all_intrs(adapter);
4228 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4235 vmxnet3_suspend(struct device *device)
4237 struct pci_dev *pdev = to_pci_dev(device);
4238 struct net_device *netdev = pci_get_drvdata(pdev);
4239 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4240 struct Vmxnet3_PMConf *pmConf;
4241 struct ethhdr *ehdr;
4242 struct arphdr *ahdr;
4244 struct in_device *in_dev;
4245 struct in_ifaddr *ifa;
4246 unsigned long flags;
4249 if (!netif_running(netdev))
4252 for (i = 0; i < adapter->num_rx_queues; i++)
4253 napi_disable(&adapter->rx_queue[i].napi);
4255 vmxnet3_disable_all_intrs(adapter);
4256 vmxnet3_free_irqs(adapter);
4257 vmxnet3_free_intr_resources(adapter);
4259 netif_device_detach(netdev);
4261 /* Create wake-up filters. */
4262 pmConf = adapter->pm_conf;
4263 memset(pmConf, 0, sizeof(*pmConf));
4265 if (adapter->wol & WAKE_UCAST) {
4266 pmConf->filters[i].patternSize = ETH_ALEN;
4267 pmConf->filters[i].maskSize = 1;
4268 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4269 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4271 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4275 if (adapter->wol & WAKE_ARP) {
4278 in_dev = __in_dev_get_rcu(netdev);
4284 ifa = rcu_dereference(in_dev->ifa_list);
4290 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4291 sizeof(struct arphdr) + /* ARP header */
4292 2 * ETH_ALEN + /* 2 Ethernet addresses*/
4293 2 * sizeof(u32); /*2 IPv4 addresses */
4294 pmConf->filters[i].maskSize =
4295 (pmConf->filters[i].patternSize - 1) / 8 + 1;
4297 /* ETH_P_ARP in Ethernet header. */
4298 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4299 ehdr->h_proto = htons(ETH_P_ARP);
4301 /* ARPOP_REQUEST in ARP header. */
4302 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4303 ahdr->ar_op = htons(ARPOP_REQUEST);
4304 arpreq = (u8 *)(ahdr + 1);
4306 /* The Unicast IPv4 address in 'tip' field. */
4307 arpreq += 2 * ETH_ALEN + sizeof(u32);
4308 *(__be32 *)arpreq = ifa->ifa_address;
4312 /* The mask for the relevant bits. */
4313 pmConf->filters[i].mask[0] = 0x00;
4314 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4315 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4316 pmConf->filters[i].mask[3] = 0x00;
4317 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4318 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4320 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4325 if (adapter->wol & WAKE_MAGIC)
4326 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4328 pmConf->numFilters = i;
4330 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4331 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4333 adapter->shared->devRead.pmConfDesc.confPA =
4334 cpu_to_le64(adapter->pm_conf_pa);
4336 spin_lock_irqsave(&adapter->cmd_lock, flags);
4337 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4338 VMXNET3_CMD_UPDATE_PMCFG);
4339 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4341 pci_save_state(pdev);
4342 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4344 pci_disable_device(pdev);
4345 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4352 vmxnet3_resume(struct device *device)
4355 unsigned long flags;
4356 struct pci_dev *pdev = to_pci_dev(device);
4357 struct net_device *netdev = pci_get_drvdata(pdev);
4358 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4360 if (!netif_running(netdev))
4363 pci_set_power_state(pdev, PCI_D0);
4364 pci_restore_state(pdev);
4365 err = pci_enable_device_mem(pdev);
4369 pci_enable_wake(pdev, PCI_D0, 0);
4371 vmxnet3_alloc_intr_resources(adapter);
4373 /* During hibernate and suspend, device has to be reinitialized as the
4374 * device state need not be preserved.
4377 /* Need not check adapter state as other reset tasks cannot run during
4380 spin_lock_irqsave(&adapter->cmd_lock, flags);
4381 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4382 VMXNET3_CMD_QUIESCE_DEV);
4383 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4384 vmxnet3_tq_cleanup_all(adapter);
4385 vmxnet3_rq_cleanup_all(adapter);
4387 vmxnet3_reset_dev(adapter);
4388 err = vmxnet3_activate_dev(adapter);
4391 "failed to re-activate on resume, error: %d", err);
4392 vmxnet3_force_close(adapter);
4395 netif_device_attach(netdev);
4400 static const struct dev_pm_ops vmxnet3_pm_ops = {
4401 .suspend = vmxnet3_suspend,
4402 .resume = vmxnet3_resume,
4403 .freeze = vmxnet3_suspend,
4404 .restore = vmxnet3_resume,
4408 static struct pci_driver vmxnet3_driver = {
4409 .name = vmxnet3_driver_name,
4410 .id_table = vmxnet3_pciid_table,
4411 .probe = vmxnet3_probe_device,
4412 .remove = vmxnet3_remove_device,
4413 .shutdown = vmxnet3_shutdown_device,
4415 .driver.pm = &vmxnet3_pm_ops,
4421 vmxnet3_init_module(void)
4423 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4424 VMXNET3_DRIVER_VERSION_REPORT);
4425 return pci_register_driver(&vmxnet3_driver);
4428 module_init(vmxnet3_init_module);
4432 vmxnet3_exit_module(void)
4434 pci_unregister_driver(&vmxnet3_driver);
4437 module_exit(vmxnet3_exit_module);
4439 MODULE_AUTHOR("VMware, Inc.");
4440 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4441 MODULE_LICENSE("GPL v2");
4442 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);