Merge tag 'wireless-drivers-for-davem-2017-04-03' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / drivers / net / ethernet / intel / i40evf / i40evf_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4  * Copyright(c) 2013 - 2016 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 #include "i40evf.h"
28 #include "i40e_prototype.h"
29 #include "i40evf_client.h"
30 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
31 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
32 static int i40evf_close(struct net_device *netdev);
33
34 char i40evf_driver_name[] = "i40evf";
35 static const char i40evf_driver_string[] =
36         "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 2
41 #define DRV_VERSION_MINOR 1
42 #define DRV_VERSION_BUILD 7
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44              __stringify(DRV_VERSION_MINOR) "." \
45              __stringify(DRV_VERSION_BUILD) \
46              DRV_KERN
47 const char i40evf_driver_version[] = DRV_VERSION;
48 static const char i40evf_copyright[] =
49         "Copyright (c) 2013 - 2015 Intel Corporation.";
50
51 /* i40evf_pci_tbl - PCI Device ID Table
52  *
53  * Wildcard entries (PCI_ANY_ID) should come last
54  * Last entry must be all 0s
55  *
56  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
57  *   Class, Class Mask, private data (not used) }
58  */
59 static const struct pci_device_id i40evf_pci_tbl[] = {
60         {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
61         {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
62         {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
63         /* required last entry */
64         {0, }
65 };
66
67 MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
68
69 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
70 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_VERSION);
73
74 static struct workqueue_struct *i40evf_wq;
75
76 /**
77  * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
78  * @hw:   pointer to the HW structure
79  * @mem:  ptr to mem struct to fill out
80  * @size: size of memory requested
81  * @alignment: what to align the allocation to
82  **/
83 i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
84                                       struct i40e_dma_mem *mem,
85                                       u64 size, u32 alignment)
86 {
87         struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
88
89         if (!mem)
90                 return I40E_ERR_PARAM;
91
92         mem->size = ALIGN(size, alignment);
93         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
94                                      (dma_addr_t *)&mem->pa, GFP_KERNEL);
95         if (mem->va)
96                 return 0;
97         else
98                 return I40E_ERR_NO_MEMORY;
99 }
100
101 /**
102  * i40evf_free_dma_mem_d - OS specific memory free for shared code
103  * @hw:   pointer to the HW structure
104  * @mem:  ptr to mem struct to free
105  **/
106 i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
107 {
108         struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
109
110         if (!mem || !mem->va)
111                 return I40E_ERR_PARAM;
112         dma_free_coherent(&adapter->pdev->dev, mem->size,
113                           mem->va, (dma_addr_t)mem->pa);
114         return 0;
115 }
116
117 /**
118  * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
119  * @hw:   pointer to the HW structure
120  * @mem:  ptr to mem struct to fill out
121  * @size: size of memory requested
122  **/
123 i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
124                                        struct i40e_virt_mem *mem, u32 size)
125 {
126         if (!mem)
127                 return I40E_ERR_PARAM;
128
129         mem->size = size;
130         mem->va = kzalloc(size, GFP_KERNEL);
131
132         if (mem->va)
133                 return 0;
134         else
135                 return I40E_ERR_NO_MEMORY;
136 }
137
138 /**
139  * i40evf_free_virt_mem_d - OS specific memory free for shared code
140  * @hw:   pointer to the HW structure
141  * @mem:  ptr to mem struct to free
142  **/
143 i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
144                                    struct i40e_virt_mem *mem)
145 {
146         if (!mem)
147                 return I40E_ERR_PARAM;
148
149         /* it's ok to kfree a NULL pointer */
150         kfree(mem->va);
151
152         return 0;
153 }
154
155 /**
156  * i40evf_debug_d - OS dependent version of debug printing
157  * @hw:  pointer to the HW structure
158  * @mask: debug level mask
159  * @fmt_str: printf-type format description
160  **/
161 void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
162 {
163         char buf[512];
164         va_list argptr;
165
166         if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
167                 return;
168
169         va_start(argptr, fmt_str);
170         vsnprintf(buf, sizeof(buf), fmt_str, argptr);
171         va_end(argptr);
172
173         /* the debug string is already formatted with a newline */
174         pr_info("%s", buf);
175 }
176
177 /**
178  * i40evf_schedule_reset - Set the flags and schedule a reset event
179  * @adapter: board private structure
180  **/
181 void i40evf_schedule_reset(struct i40evf_adapter *adapter)
182 {
183         if (!(adapter->flags &
184               (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
185                 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
186                 schedule_work(&adapter->reset_task);
187         }
188 }
189
190 /**
191  * i40evf_tx_timeout - Respond to a Tx Hang
192  * @netdev: network interface device structure
193  **/
194 static void i40evf_tx_timeout(struct net_device *netdev)
195 {
196         struct i40evf_adapter *adapter = netdev_priv(netdev);
197
198         adapter->tx_timeout_count++;
199         i40evf_schedule_reset(adapter);
200 }
201
202 /**
203  * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
204  * @adapter: board private structure
205  **/
206 static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
207 {
208         struct i40e_hw *hw = &adapter->hw;
209
210         if (!adapter->msix_entries)
211                 return;
212
213         wr32(hw, I40E_VFINT_DYN_CTL01, 0);
214
215         /* read flush */
216         rd32(hw, I40E_VFGEN_RSTAT);
217
218         synchronize_irq(adapter->msix_entries[0].vector);
219 }
220
221 /**
222  * i40evf_misc_irq_enable - Enable default interrupt generation settings
223  * @adapter: board private structure
224  **/
225 static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
226 {
227         struct i40e_hw *hw = &adapter->hw;
228
229         wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
230                                        I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
231         wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
232
233         /* read flush */
234         rd32(hw, I40E_VFGEN_RSTAT);
235 }
236
237 /**
238  * i40evf_irq_disable - Mask off interrupt generation on the NIC
239  * @adapter: board private structure
240  **/
241 static void i40evf_irq_disable(struct i40evf_adapter *adapter)
242 {
243         int i;
244         struct i40e_hw *hw = &adapter->hw;
245
246         if (!adapter->msix_entries)
247                 return;
248
249         for (i = 1; i < adapter->num_msix_vectors; i++) {
250                 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
251                 synchronize_irq(adapter->msix_entries[i].vector);
252         }
253         /* read flush */
254         rd32(hw, I40E_VFGEN_RSTAT);
255 }
256
257 /**
258  * i40evf_irq_enable_queues - Enable interrupt for specified queues
259  * @adapter: board private structure
260  * @mask: bitmap of queues to enable
261  **/
262 void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
263 {
264         struct i40e_hw *hw = &adapter->hw;
265         int i;
266
267         for (i = 1; i < adapter->num_msix_vectors; i++) {
268                 if (mask & BIT(i - 1)) {
269                         wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
270                              I40E_VFINT_DYN_CTLN1_INTENA_MASK |
271                              I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
272                              I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
273                 }
274         }
275 }
276
277 /**
278  * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
279  * @adapter: board private structure
280  * @mask: bitmap of vectors to trigger
281  **/
282 static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
283 {
284         struct i40e_hw *hw = &adapter->hw;
285         int i;
286         u32 dyn_ctl;
287
288         if (mask & 1) {
289                 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
290                 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
291                            I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
292                            I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
293                 wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
294         }
295         for (i = 1; i < adapter->num_msix_vectors; i++) {
296                 if (mask & BIT(i)) {
297                         dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
298                         dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
299                                    I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
300                                    I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
301                         wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
302                 }
303         }
304 }
305
306 /**
307  * i40evf_irq_enable - Enable default interrupt generation settings
308  * @adapter: board private structure
309  * @flush: boolean value whether to run rd32()
310  **/
311 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
312 {
313         struct i40e_hw *hw = &adapter->hw;
314
315         i40evf_misc_irq_enable(adapter);
316         i40evf_irq_enable_queues(adapter, ~0);
317
318         if (flush)
319                 rd32(hw, I40E_VFGEN_RSTAT);
320 }
321
322 /**
323  * i40evf_msix_aq - Interrupt handler for vector 0
324  * @irq: interrupt number
325  * @data: pointer to netdev
326  **/
327 static irqreturn_t i40evf_msix_aq(int irq, void *data)
328 {
329         struct net_device *netdev = data;
330         struct i40evf_adapter *adapter = netdev_priv(netdev);
331         struct i40e_hw *hw = &adapter->hw;
332         u32 val;
333
334         /* handle non-queue interrupts, these reads clear the registers */
335         val = rd32(hw, I40E_VFINT_ICR01);
336         val = rd32(hw, I40E_VFINT_ICR0_ENA1);
337
338         val = rd32(hw, I40E_VFINT_DYN_CTL01) |
339               I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
340         wr32(hw, I40E_VFINT_DYN_CTL01, val);
341
342         /* schedule work on the private workqueue */
343         schedule_work(&adapter->adminq_task);
344
345         return IRQ_HANDLED;
346 }
347
348 /**
349  * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
350  * @irq: interrupt number
351  * @data: pointer to a q_vector
352  **/
353 static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
354 {
355         struct i40e_q_vector *q_vector = data;
356
357         if (!q_vector->tx.ring && !q_vector->rx.ring)
358                 return IRQ_HANDLED;
359
360         napi_schedule_irqoff(&q_vector->napi);
361
362         return IRQ_HANDLED;
363 }
364
365 /**
366  * i40evf_map_vector_to_rxq - associate irqs with rx queues
367  * @adapter: board private structure
368  * @v_idx: interrupt number
369  * @r_idx: queue number
370  **/
371 static void
372 i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
373 {
374         struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
375         struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
376         struct i40e_hw *hw = &adapter->hw;
377
378         rx_ring->q_vector = q_vector;
379         rx_ring->next = q_vector->rx.ring;
380         rx_ring->vsi = &adapter->vsi;
381         q_vector->rx.ring = rx_ring;
382         q_vector->rx.count++;
383         q_vector->rx.latency_range = I40E_LOW_LATENCY;
384         q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
385         q_vector->ring_mask |= BIT(r_idx);
386         q_vector->itr_countdown = ITR_COUNTDOWN_START;
387         wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
388 }
389
390 /**
391  * i40evf_map_vector_to_txq - associate irqs with tx queues
392  * @adapter: board private structure
393  * @v_idx: interrupt number
394  * @t_idx: queue number
395  **/
396 static void
397 i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
398 {
399         struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
400         struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
401         struct i40e_hw *hw = &adapter->hw;
402
403         tx_ring->q_vector = q_vector;
404         tx_ring->next = q_vector->tx.ring;
405         tx_ring->vsi = &adapter->vsi;
406         q_vector->tx.ring = tx_ring;
407         q_vector->tx.count++;
408         q_vector->tx.latency_range = I40E_LOW_LATENCY;
409         q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
410         q_vector->itr_countdown = ITR_COUNTDOWN_START;
411         q_vector->num_ringpairs++;
412         wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
413 }
414
415 /**
416  * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
417  * @adapter: board private structure to initialize
418  *
419  * This function maps descriptor rings to the queue-specific vectors
420  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
421  * one vector per ring/queue, but on a constrained vector budget, we
422  * group the rings as "efficiently" as possible.  You would add new
423  * mapping configurations in here.
424  **/
425 static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
426 {
427         int q_vectors;
428         int v_start = 0;
429         int rxr_idx = 0, txr_idx = 0;
430         int rxr_remaining = adapter->num_active_queues;
431         int txr_remaining = adapter->num_active_queues;
432         int i, j;
433         int rqpv, tqpv;
434         int err = 0;
435
436         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
437
438         /* The ideal configuration...
439          * We have enough vectors to map one per queue.
440          */
441         if (q_vectors >= (rxr_remaining * 2)) {
442                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
443                         i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
444
445                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
446                         i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
447                 goto out;
448         }
449
450         /* If we don't have enough vectors for a 1-to-1
451          * mapping, we'll have to group them so there are
452          * multiple queues per vector.
453          * Re-adjusting *qpv takes care of the remainder.
454          */
455         for (i = v_start; i < q_vectors; i++) {
456                 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
457                 for (j = 0; j < rqpv; j++) {
458                         i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
459                         rxr_idx++;
460                         rxr_remaining--;
461                 }
462         }
463         for (i = v_start; i < q_vectors; i++) {
464                 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
465                 for (j = 0; j < tqpv; j++) {
466                         i40evf_map_vector_to_txq(adapter, i, txr_idx);
467                         txr_idx++;
468                         txr_remaining--;
469                 }
470         }
471
472 out:
473         adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
474
475         return err;
476 }
477
478 #ifdef CONFIG_NET_POLL_CONTROLLER
479 /**
480  * i40evf_netpoll - A Polling 'interrupt' handler
481  * @netdev: network interface device structure
482  *
483  * This is used by netconsole to send skbs without having to re-enable
484  * interrupts.  It's not called while the normal interrupt routine is executing.
485  **/
486 static void i40evf_netpoll(struct net_device *netdev)
487 {
488         struct i40evf_adapter *adapter = netdev_priv(netdev);
489         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
490         int i;
491
492         /* if interface is down do nothing */
493         if (test_bit(__I40E_DOWN, &adapter->vsi.state))
494                 return;
495
496         for (i = 0; i < q_vectors; i++)
497                 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
498 }
499
500 #endif
501 /**
502  * i40evf_irq_affinity_notify - Callback for affinity changes
503  * @notify: context as to what irq was changed
504  * @mask: the new affinity mask
505  *
506  * This is a callback function used by the irq_set_affinity_notifier function
507  * so that we may register to receive changes to the irq affinity masks.
508  **/
509 static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
510                                        const cpumask_t *mask)
511 {
512         struct i40e_q_vector *q_vector =
513                 container_of(notify, struct i40e_q_vector, affinity_notify);
514
515         q_vector->affinity_mask = *mask;
516 }
517
518 /**
519  * i40evf_irq_affinity_release - Callback for affinity notifier release
520  * @ref: internal core kernel usage
521  *
522  * This is a callback function used by the irq_set_affinity_notifier function
523  * to inform the current notification subscriber that they will no longer
524  * receive notifications.
525  **/
526 static void i40evf_irq_affinity_release(struct kref *ref) {}
527
528 /**
529  * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
530  * @adapter: board private structure
531  *
532  * Allocates MSI-X vectors for tx and rx handling, and requests
533  * interrupts from the kernel.
534  **/
535 static int
536 i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
537 {
538         int vector, err, q_vectors;
539         int rx_int_idx = 0, tx_int_idx = 0;
540         int irq_num;
541
542         i40evf_irq_disable(adapter);
543         /* Decrement for Other and TCP Timer vectors */
544         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
545
546         for (vector = 0; vector < q_vectors; vector++) {
547                 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
548                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
549
550                 if (q_vector->tx.ring && q_vector->rx.ring) {
551                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
552                                  "i40evf-%s-%s-%d", basename,
553                                  "TxRx", rx_int_idx++);
554                         tx_int_idx++;
555                 } else if (q_vector->rx.ring) {
556                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
557                                  "i40evf-%s-%s-%d", basename,
558                                  "rx", rx_int_idx++);
559                 } else if (q_vector->tx.ring) {
560                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
561                                  "i40evf-%s-%s-%d", basename,
562                                  "tx", tx_int_idx++);
563                 } else {
564                         /* skip this unused q_vector */
565                         continue;
566                 }
567                 err = request_irq(irq_num,
568                                   i40evf_msix_clean_rings,
569                                   0,
570                                   q_vector->name,
571                                   q_vector);
572                 if (err) {
573                         dev_info(&adapter->pdev->dev,
574                                  "Request_irq failed, error: %d\n", err);
575                         goto free_queue_irqs;
576                 }
577                 /* register for affinity change notifications */
578                 q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
579                 q_vector->affinity_notify.release =
580                                                    i40evf_irq_affinity_release;
581                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
582                 /* assign the mask for this irq */
583                 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
584         }
585
586         return 0;
587
588 free_queue_irqs:
589         while (vector) {
590                 vector--;
591                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
592                 irq_set_affinity_notifier(irq_num, NULL);
593                 irq_set_affinity_hint(irq_num, NULL);
594                 free_irq(irq_num, &adapter->q_vectors[vector]);
595         }
596         return err;
597 }
598
599 /**
600  * i40evf_request_misc_irq - Initialize MSI-X interrupts
601  * @adapter: board private structure
602  *
603  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
604  * vector is only for the admin queue, and stays active even when the netdev
605  * is closed.
606  **/
607 static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
608 {
609         struct net_device *netdev = adapter->netdev;
610         int err;
611
612         snprintf(adapter->misc_vector_name,
613                  sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
614                  dev_name(&adapter->pdev->dev));
615         err = request_irq(adapter->msix_entries[0].vector,
616                           &i40evf_msix_aq, 0,
617                           adapter->misc_vector_name, netdev);
618         if (err) {
619                 dev_err(&adapter->pdev->dev,
620                         "request_irq for %s failed: %d\n",
621                         adapter->misc_vector_name, err);
622                 free_irq(adapter->msix_entries[0].vector, netdev);
623         }
624         return err;
625 }
626
627 /**
628  * i40evf_free_traffic_irqs - Free MSI-X interrupts
629  * @adapter: board private structure
630  *
631  * Frees all MSI-X vectors other than 0.
632  **/
633 static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
634 {
635         int vector, irq_num, q_vectors;
636
637         if (!adapter->msix_entries)
638                 return;
639
640         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
641
642         for (vector = 0; vector < q_vectors; vector++) {
643                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
644                 irq_set_affinity_notifier(irq_num, NULL);
645                 irq_set_affinity_hint(irq_num, NULL);
646                 free_irq(irq_num, &adapter->q_vectors[vector]);
647         }
648 }
649
650 /**
651  * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
652  * @adapter: board private structure
653  *
654  * Frees MSI-X vector 0.
655  **/
656 static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
657 {
658         struct net_device *netdev = adapter->netdev;
659
660         if (!adapter->msix_entries)
661                 return;
662
663         free_irq(adapter->msix_entries[0].vector, netdev);
664 }
665
666 /**
667  * i40evf_configure_tx - Configure Transmit Unit after Reset
668  * @adapter: board private structure
669  *
670  * Configure the Tx unit of the MAC after a reset.
671  **/
672 static void i40evf_configure_tx(struct i40evf_adapter *adapter)
673 {
674         struct i40e_hw *hw = &adapter->hw;
675         int i;
676
677         for (i = 0; i < adapter->num_active_queues; i++)
678                 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
679 }
680
681 /**
682  * i40evf_configure_rx - Configure Receive Unit after Reset
683  * @adapter: board private structure
684  *
685  * Configure the Rx unit of the MAC after a reset.
686  **/
687 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
688 {
689         unsigned int rx_buf_len = I40E_RXBUFFER_2048;
690         struct net_device *netdev = adapter->netdev;
691         struct i40e_hw *hw = &adapter->hw;
692         int i;
693
694         /* Legacy Rx will always default to a 2048 buffer size. */
695 #if (PAGE_SIZE < 8192)
696         if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
697                 /* We use a 1536 buffer size for configurations with
698                  * standard Ethernet mtu.  On x86 this gives us enough room
699                  * for shared info and 192 bytes of padding.
700                  */
701                 if (netdev->mtu <= ETH_DATA_LEN)
702                         rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
703         }
704 #endif
705
706         for (i = 0; i < adapter->num_active_queues; i++) {
707                 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
708                 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
709         }
710 }
711
712 /**
713  * i40evf_find_vlan - Search filter list for specific vlan filter
714  * @adapter: board private structure
715  * @vlan: vlan tag
716  *
717  * Returns ptr to the filter object or NULL
718  **/
719 static struct
720 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
721 {
722         struct i40evf_vlan_filter *f;
723
724         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
725                 if (vlan == f->vlan)
726                         return f;
727         }
728         return NULL;
729 }
730
731 /**
732  * i40evf_add_vlan - Add a vlan filter to the list
733  * @adapter: board private structure
734  * @vlan: VLAN tag
735  *
736  * Returns ptr to the filter object or NULL when no memory available.
737  **/
738 static struct
739 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
740 {
741         struct i40evf_vlan_filter *f = NULL;
742         int count = 50;
743
744         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
745                                 &adapter->crit_section)) {
746                 udelay(1);
747                 if (--count == 0)
748                         goto out;
749         }
750
751         f = i40evf_find_vlan(adapter, vlan);
752         if (!f) {
753                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
754                 if (!f)
755                         goto clearout;
756
757                 f->vlan = vlan;
758
759                 INIT_LIST_HEAD(&f->list);
760                 list_add(&f->list, &adapter->vlan_filter_list);
761                 f->add = true;
762                 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
763         }
764
765 clearout:
766         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
767 out:
768         return f;
769 }
770
771 /**
772  * i40evf_del_vlan - Remove a vlan filter from the list
773  * @adapter: board private structure
774  * @vlan: VLAN tag
775  **/
776 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
777 {
778         struct i40evf_vlan_filter *f;
779         int count = 50;
780
781         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
782                                 &adapter->crit_section)) {
783                 udelay(1);
784                 if (--count == 0)
785                         return;
786         }
787
788         f = i40evf_find_vlan(adapter, vlan);
789         if (f) {
790                 f->remove = true;
791                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
792         }
793         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
794 }
795
796 /**
797  * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
798  * @netdev: network device struct
799  * @vid: VLAN tag
800  **/
801 static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
802                                   __always_unused __be16 proto, u16 vid)
803 {
804         struct i40evf_adapter *adapter = netdev_priv(netdev);
805
806         if (!VLAN_ALLOWED(adapter))
807                 return -EIO;
808         if (i40evf_add_vlan(adapter, vid) == NULL)
809                 return -ENOMEM;
810         return 0;
811 }
812
813 /**
814  * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
815  * @netdev: network device struct
816  * @vid: VLAN tag
817  **/
818 static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
819                                    __always_unused __be16 proto, u16 vid)
820 {
821         struct i40evf_adapter *adapter = netdev_priv(netdev);
822
823         if (VLAN_ALLOWED(adapter)) {
824                 i40evf_del_vlan(adapter, vid);
825                 return 0;
826         }
827         return -EIO;
828 }
829
830 /**
831  * i40evf_find_filter - Search filter list for specific mac filter
832  * @adapter: board private structure
833  * @macaddr: the MAC address
834  *
835  * Returns ptr to the filter object or NULL
836  **/
837 static struct
838 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
839                                       u8 *macaddr)
840 {
841         struct i40evf_mac_filter *f;
842
843         if (!macaddr)
844                 return NULL;
845
846         list_for_each_entry(f, &adapter->mac_filter_list, list) {
847                 if (ether_addr_equal(macaddr, f->macaddr))
848                         return f;
849         }
850         return NULL;
851 }
852
853 /**
854  * i40e_add_filter - Add a mac filter to the filter list
855  * @adapter: board private structure
856  * @macaddr: the MAC address
857  *
858  * Returns ptr to the filter object or NULL when no memory available.
859  **/
860 static struct
861 i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
862                                      u8 *macaddr)
863 {
864         struct i40evf_mac_filter *f;
865         int count = 50;
866
867         if (!macaddr)
868                 return NULL;
869
870         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
871                                 &adapter->crit_section)) {
872                 udelay(1);
873                 if (--count == 0)
874                         return NULL;
875         }
876
877         f = i40evf_find_filter(adapter, macaddr);
878         if (!f) {
879                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
880                 if (!f) {
881                         clear_bit(__I40EVF_IN_CRITICAL_TASK,
882                                   &adapter->crit_section);
883                         return NULL;
884                 }
885
886                 ether_addr_copy(f->macaddr, macaddr);
887
888                 list_add_tail(&f->list, &adapter->mac_filter_list);
889                 f->add = true;
890                 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
891         }
892
893         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
894         return f;
895 }
896
897 /**
898  * i40evf_set_mac - NDO callback to set port mac address
899  * @netdev: network interface device structure
900  * @p: pointer to an address structure
901  *
902  * Returns 0 on success, negative on failure
903  **/
904 static int i40evf_set_mac(struct net_device *netdev, void *p)
905 {
906         struct i40evf_adapter *adapter = netdev_priv(netdev);
907         struct i40e_hw *hw = &adapter->hw;
908         struct i40evf_mac_filter *f;
909         struct sockaddr *addr = p;
910
911         if (!is_valid_ether_addr(addr->sa_data))
912                 return -EADDRNOTAVAIL;
913
914         if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
915                 return 0;
916
917         if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
918                 return -EPERM;
919
920         f = i40evf_find_filter(adapter, hw->mac.addr);
921         if (f) {
922                 f->remove = true;
923                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
924         }
925
926         f = i40evf_add_filter(adapter, addr->sa_data);
927         if (f) {
928                 ether_addr_copy(hw->mac.addr, addr->sa_data);
929                 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
930         }
931
932         return (f == NULL) ? -ENOMEM : 0;
933 }
934
935 /**
936  * i40evf_set_rx_mode - NDO callback to set the netdev filters
937  * @netdev: network interface device structure
938  **/
939 static void i40evf_set_rx_mode(struct net_device *netdev)
940 {
941         struct i40evf_adapter *adapter = netdev_priv(netdev);
942         struct i40evf_mac_filter *f, *ftmp;
943         struct netdev_hw_addr *uca;
944         struct netdev_hw_addr *mca;
945         struct netdev_hw_addr *ha;
946         int count = 50;
947
948         /* add addr if not already in the filter list */
949         netdev_for_each_uc_addr(uca, netdev) {
950                 i40evf_add_filter(adapter, uca->addr);
951         }
952         netdev_for_each_mc_addr(mca, netdev) {
953                 i40evf_add_filter(adapter, mca->addr);
954         }
955
956         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
957                                 &adapter->crit_section)) {
958                 udelay(1);
959                 if (--count == 0) {
960                         dev_err(&adapter->pdev->dev,
961                                 "Failed to get lock in %s\n", __func__);
962                         return;
963                 }
964         }
965         /* remove filter if not in netdev list */
966         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
967                 netdev_for_each_mc_addr(mca, netdev)
968                         if (ether_addr_equal(mca->addr, f->macaddr))
969                                 goto bottom_of_search_loop;
970
971                 netdev_for_each_uc_addr(uca, netdev)
972                         if (ether_addr_equal(uca->addr, f->macaddr))
973                                 goto bottom_of_search_loop;
974
975                 for_each_dev_addr(netdev, ha)
976                         if (ether_addr_equal(ha->addr, f->macaddr))
977                                 goto bottom_of_search_loop;
978
979                 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
980                         goto bottom_of_search_loop;
981
982                 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
983                 f->remove = true;
984                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
985
986 bottom_of_search_loop:
987                 continue;
988         }
989
990         if (netdev->flags & IFF_PROMISC &&
991             !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
992                 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
993         else if (!(netdev->flags & IFF_PROMISC) &&
994                  adapter->flags & I40EVF_FLAG_PROMISC_ON)
995                 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
996
997         if (netdev->flags & IFF_ALLMULTI &&
998             !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
999                 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
1000         else if (!(netdev->flags & IFF_ALLMULTI) &&
1001                  adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
1002                 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
1003
1004         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1005 }
1006
1007 /**
1008  * i40evf_napi_enable_all - enable NAPI on all queue vectors
1009  * @adapter: board private structure
1010  **/
1011 static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
1012 {
1013         int q_idx;
1014         struct i40e_q_vector *q_vector;
1015         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1016
1017         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1018                 struct napi_struct *napi;
1019
1020                 q_vector = &adapter->q_vectors[q_idx];
1021                 napi = &q_vector->napi;
1022                 napi_enable(napi);
1023         }
1024 }
1025
1026 /**
1027  * i40evf_napi_disable_all - disable NAPI on all queue vectors
1028  * @adapter: board private structure
1029  **/
1030 static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
1031 {
1032         int q_idx;
1033         struct i40e_q_vector *q_vector;
1034         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1035
1036         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1037                 q_vector = &adapter->q_vectors[q_idx];
1038                 napi_disable(&q_vector->napi);
1039         }
1040 }
1041
1042 /**
1043  * i40evf_configure - set up transmit and receive data structures
1044  * @adapter: board private structure
1045  **/
1046 static void i40evf_configure(struct i40evf_adapter *adapter)
1047 {
1048         struct net_device *netdev = adapter->netdev;
1049         int i;
1050
1051         i40evf_set_rx_mode(netdev);
1052
1053         i40evf_configure_tx(adapter);
1054         i40evf_configure_rx(adapter);
1055         adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
1056
1057         for (i = 0; i < adapter->num_active_queues; i++) {
1058                 struct i40e_ring *ring = &adapter->rx_rings[i];
1059
1060                 i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
1061         }
1062 }
1063
1064 /**
1065  * i40evf_up_complete - Finish the last steps of bringing up a connection
1066  * @adapter: board private structure
1067  **/
1068 static void i40evf_up_complete(struct i40evf_adapter *adapter)
1069 {
1070         adapter->state = __I40EVF_RUNNING;
1071         clear_bit(__I40E_DOWN, &adapter->vsi.state);
1072
1073         i40evf_napi_enable_all(adapter);
1074
1075         adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
1076         if (CLIENT_ENABLED(adapter))
1077                 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
1078         mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1079 }
1080
1081 /**
1082  * i40e_down - Shutdown the connection processing
1083  * @adapter: board private structure
1084  **/
1085 void i40evf_down(struct i40evf_adapter *adapter)
1086 {
1087         struct net_device *netdev = adapter->netdev;
1088         struct i40evf_mac_filter *f;
1089
1090         if (adapter->state <= __I40EVF_DOWN_PENDING)
1091                 return;
1092
1093         while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
1094                                 &adapter->crit_section))
1095                 usleep_range(500, 1000);
1096
1097         netif_carrier_off(netdev);
1098         netif_tx_disable(netdev);
1099         adapter->link_up = false;
1100         i40evf_napi_disable_all(adapter);
1101         i40evf_irq_disable(adapter);
1102
1103         /* remove all MAC filters */
1104         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1105                 f->remove = true;
1106         }
1107         /* remove all VLAN filters */
1108         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
1109                 f->remove = true;
1110         }
1111         if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
1112             adapter->state != __I40EVF_RESETTING) {
1113                 /* cancel any current operation */
1114                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1115                 /* Schedule operations to close down the HW. Don't wait
1116                  * here for this to complete. The watchdog is still running
1117                  * and it will take care of this.
1118                  */
1119                 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1120                 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
1121                 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
1122         }
1123
1124         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1125 }
1126
1127 /**
1128  * i40evf_acquire_msix_vectors - Setup the MSIX capability
1129  * @adapter: board private structure
1130  * @vectors: number of vectors to request
1131  *
1132  * Work with the OS to set up the MSIX vectors needed.
1133  *
1134  * Returns 0 on success, negative on failure
1135  **/
1136 static int
1137 i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1138 {
1139         int err, vector_threshold;
1140
1141         /* We'll want at least 3 (vector_threshold):
1142          * 0) Other (Admin Queue and link, mostly)
1143          * 1) TxQ[0] Cleanup
1144          * 2) RxQ[0] Cleanup
1145          */
1146         vector_threshold = MIN_MSIX_COUNT;
1147
1148         /* The more we get, the more we will assign to Tx/Rx Cleanup
1149          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1150          * Right now, we simply care about how many we'll get; we'll
1151          * set them up later while requesting irq's.
1152          */
1153         err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1154                                     vector_threshold, vectors);
1155         if (err < 0) {
1156                 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1157                 kfree(adapter->msix_entries);
1158                 adapter->msix_entries = NULL;
1159                 return err;
1160         }
1161
1162         /* Adjust for only the vectors we'll use, which is minimum
1163          * of max_msix_q_vectors + NONQ_VECS, or the number of
1164          * vectors we were allocated.
1165          */
1166         adapter->num_msix_vectors = err;
1167         return 0;
1168 }
1169
1170 /**
1171  * i40evf_free_queues - Free memory for all rings
1172  * @adapter: board private structure to initialize
1173  *
1174  * Free all of the memory associated with queue pairs.
1175  **/
1176 static void i40evf_free_queues(struct i40evf_adapter *adapter)
1177 {
1178         if (!adapter->vsi_res)
1179                 return;
1180         kfree(adapter->tx_rings);
1181         adapter->tx_rings = NULL;
1182         kfree(adapter->rx_rings);
1183         adapter->rx_rings = NULL;
1184 }
1185
1186 /**
1187  * i40evf_alloc_queues - Allocate memory for all rings
1188  * @adapter: board private structure to initialize
1189  *
1190  * We allocate one ring per queue at run-time since we don't know the
1191  * number of queues at compile-time.  The polling_netdev array is
1192  * intended for Multiqueue, but should work fine with a single queue.
1193  **/
1194 static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1195 {
1196         int i;
1197
1198         adapter->tx_rings = kcalloc(adapter->num_active_queues,
1199                                     sizeof(struct i40e_ring), GFP_KERNEL);
1200         if (!adapter->tx_rings)
1201                 goto err_out;
1202         adapter->rx_rings = kcalloc(adapter->num_active_queues,
1203                                     sizeof(struct i40e_ring), GFP_KERNEL);
1204         if (!adapter->rx_rings)
1205                 goto err_out;
1206
1207         for (i = 0; i < adapter->num_active_queues; i++) {
1208                 struct i40e_ring *tx_ring;
1209                 struct i40e_ring *rx_ring;
1210
1211                 tx_ring = &adapter->tx_rings[i];
1212
1213                 tx_ring->queue_index = i;
1214                 tx_ring->netdev = adapter->netdev;
1215                 tx_ring->dev = &adapter->pdev->dev;
1216                 tx_ring->count = adapter->tx_desc_count;
1217                 tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
1218                 if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
1219                         tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
1220
1221                 rx_ring = &adapter->rx_rings[i];
1222                 rx_ring->queue_index = i;
1223                 rx_ring->netdev = adapter->netdev;
1224                 rx_ring->dev = &adapter->pdev->dev;
1225                 rx_ring->count = adapter->rx_desc_count;
1226                 rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
1227         }
1228
1229         return 0;
1230
1231 err_out:
1232         i40evf_free_queues(adapter);
1233         return -ENOMEM;
1234 }
1235
1236 /**
1237  * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1238  * @adapter: board private structure to initialize
1239  *
1240  * Attempt to configure the interrupts using the best available
1241  * capabilities of the hardware and the kernel.
1242  **/
1243 static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1244 {
1245         int vector, v_budget;
1246         int pairs = 0;
1247         int err = 0;
1248
1249         if (!adapter->vsi_res) {
1250                 err = -EIO;
1251                 goto out;
1252         }
1253         pairs = adapter->num_active_queues;
1254
1255         /* It's easy to be greedy for MSI-X vectors, but it really
1256          * doesn't do us much good if we have a lot more vectors
1257          * than CPU's.  So let's be conservative and only ask for
1258          * (roughly) twice the number of vectors as there are CPU's.
1259          */
1260         v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1261         v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1262
1263         adapter->msix_entries = kcalloc(v_budget,
1264                                         sizeof(struct msix_entry), GFP_KERNEL);
1265         if (!adapter->msix_entries) {
1266                 err = -ENOMEM;
1267                 goto out;
1268         }
1269
1270         for (vector = 0; vector < v_budget; vector++)
1271                 adapter->msix_entries[vector].entry = vector;
1272
1273         err = i40evf_acquire_msix_vectors(adapter, v_budget);
1274
1275 out:
1276         netif_set_real_num_rx_queues(adapter->netdev, pairs);
1277         netif_set_real_num_tx_queues(adapter->netdev, pairs);
1278         return err;
1279 }
1280
1281 /**
1282  * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1283  * @adapter: board private structure
1284  *
1285  * Return 0 on success, negative on failure
1286  **/
1287 static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
1288 {
1289         struct i40e_aqc_get_set_rss_key_data *rss_key =
1290                 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1291         struct i40e_hw *hw = &adapter->hw;
1292         int ret = 0;
1293
1294         if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
1295                 /* bail because we already have a command pending */
1296                 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1297                         adapter->current_op);
1298                 return -EBUSY;
1299         }
1300
1301         ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1302         if (ret) {
1303                 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1304                         i40evf_stat_str(hw, ret),
1305                         i40evf_aq_str(hw, hw->aq.asq_last_status));
1306                 return ret;
1307
1308         }
1309
1310         ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1311                                     adapter->rss_lut, adapter->rss_lut_size);
1312         if (ret) {
1313                 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1314                         i40evf_stat_str(hw, ret),
1315                         i40evf_aq_str(hw, hw->aq.asq_last_status));
1316         }
1317
1318         return ret;
1319
1320 }
1321
1322 /**
1323  * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1324  * @adapter: board private structure
1325  *
1326  * Returns 0 on success, negative on failure
1327  **/
1328 static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
1329 {
1330         struct i40e_hw *hw = &adapter->hw;
1331         u32 *dw;
1332         u16 i;
1333
1334         dw = (u32 *)adapter->rss_key;
1335         for (i = 0; i <= adapter->rss_key_size / 4; i++)
1336                 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
1337
1338         dw = (u32 *)adapter->rss_lut;
1339         for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1340                 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
1341
1342         i40e_flush(hw);
1343
1344         return 0;
1345 }
1346
1347 /**
1348  * i40evf_config_rss - Configure RSS keys and lut
1349  * @adapter: board private structure
1350  *
1351  * Returns 0 on success, negative on failure
1352  **/
1353 int i40evf_config_rss(struct i40evf_adapter *adapter)
1354 {
1355
1356         if (RSS_PF(adapter)) {
1357                 adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
1358                                         I40EVF_FLAG_AQ_SET_RSS_KEY;
1359                 return 0;
1360         } else if (RSS_AQ(adapter)) {
1361                 return i40evf_config_rss_aq(adapter);
1362         } else {
1363                 return i40evf_config_rss_reg(adapter);
1364         }
1365 }
1366
1367 /**
1368  * i40evf_fill_rss_lut - Fill the lut with default values
1369  * @adapter: board private structure
1370  **/
1371 static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
1372 {
1373         u16 i;
1374
1375         for (i = 0; i < adapter->rss_lut_size; i++)
1376                 adapter->rss_lut[i] = i % adapter->num_active_queues;
1377 }
1378
1379 /**
1380  * i40evf_init_rss - Prepare for RSS
1381  * @adapter: board private structure
1382  *
1383  * Return 0 on success, negative on failure
1384  **/
1385 static int i40evf_init_rss(struct i40evf_adapter *adapter)
1386 {
1387         struct i40e_hw *hw = &adapter->hw;
1388         int ret;
1389
1390         if (!RSS_PF(adapter)) {
1391                 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1392                 if (adapter->vf_res->vf_offload_flags &
1393                     I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1394                         adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1395                 else
1396                         adapter->hena = I40E_DEFAULT_RSS_HENA;
1397
1398                 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1399                 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1400         }
1401
1402         i40evf_fill_rss_lut(adapter);
1403
1404         netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1405         ret = i40evf_config_rss(adapter);
1406
1407         return ret;
1408 }
1409
1410 /**
1411  * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1412  * @adapter: board private structure to initialize
1413  *
1414  * We allocate one q_vector per queue interrupt.  If allocation fails we
1415  * return -ENOMEM.
1416  **/
1417 static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1418 {
1419         int q_idx = 0, num_q_vectors;
1420         struct i40e_q_vector *q_vector;
1421
1422         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1423         adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1424                                      GFP_KERNEL);
1425         if (!adapter->q_vectors)
1426                 return -ENOMEM;
1427
1428         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1429                 q_vector = &adapter->q_vectors[q_idx];
1430                 q_vector->adapter = adapter;
1431                 q_vector->vsi = &adapter->vsi;
1432                 q_vector->v_idx = q_idx;
1433                 netif_napi_add(adapter->netdev, &q_vector->napi,
1434                                i40evf_napi_poll, NAPI_POLL_WEIGHT);
1435         }
1436
1437         return 0;
1438 }
1439
1440 /**
1441  * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1442  * @adapter: board private structure to initialize
1443  *
1444  * This function frees the memory allocated to the q_vectors.  In addition if
1445  * NAPI is enabled it will delete any references to the NAPI struct prior
1446  * to freeing the q_vector.
1447  **/
1448 static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1449 {
1450         int q_idx, num_q_vectors;
1451         int napi_vectors;
1452
1453         if (!adapter->q_vectors)
1454                 return;
1455
1456         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1457         napi_vectors = adapter->num_active_queues;
1458
1459         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1460                 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
1461                 if (q_idx < napi_vectors)
1462                         netif_napi_del(&q_vector->napi);
1463         }
1464         kfree(adapter->q_vectors);
1465         adapter->q_vectors = NULL;
1466 }
1467
1468 /**
1469  * i40evf_reset_interrupt_capability - Reset MSIX setup
1470  * @adapter: board private structure
1471  *
1472  **/
1473 void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1474 {
1475         if (!adapter->msix_entries)
1476                 return;
1477
1478         pci_disable_msix(adapter->pdev);
1479         kfree(adapter->msix_entries);
1480         adapter->msix_entries = NULL;
1481 }
1482
1483 /**
1484  * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1485  * @adapter: board private structure to initialize
1486  *
1487  **/
1488 int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1489 {
1490         int err;
1491
1492         rtnl_lock();
1493         err = i40evf_set_interrupt_capability(adapter);
1494         rtnl_unlock();
1495         if (err) {
1496                 dev_err(&adapter->pdev->dev,
1497                         "Unable to setup interrupt capabilities\n");
1498                 goto err_set_interrupt;
1499         }
1500
1501         err = i40evf_alloc_q_vectors(adapter);
1502         if (err) {
1503                 dev_err(&adapter->pdev->dev,
1504                         "Unable to allocate memory for queue vectors\n");
1505                 goto err_alloc_q_vectors;
1506         }
1507
1508         err = i40evf_alloc_queues(adapter);
1509         if (err) {
1510                 dev_err(&adapter->pdev->dev,
1511                         "Unable to allocate memory for queues\n");
1512                 goto err_alloc_queues;
1513         }
1514
1515         dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1516                  (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1517                  adapter->num_active_queues);
1518
1519         return 0;
1520 err_alloc_queues:
1521         i40evf_free_q_vectors(adapter);
1522 err_alloc_q_vectors:
1523         i40evf_reset_interrupt_capability(adapter);
1524 err_set_interrupt:
1525         return err;
1526 }
1527
1528 /**
1529  * i40evf_free_rss - Free memory used by RSS structs
1530  * @adapter: board private structure
1531  **/
1532 static void i40evf_free_rss(struct i40evf_adapter *adapter)
1533 {
1534         kfree(adapter->rss_key);
1535         adapter->rss_key = NULL;
1536
1537         kfree(adapter->rss_lut);
1538         adapter->rss_lut = NULL;
1539 }
1540
1541 /**
1542  * i40evf_watchdog_timer - Periodic call-back timer
1543  * @data: pointer to adapter disguised as unsigned long
1544  **/
1545 static void i40evf_watchdog_timer(unsigned long data)
1546 {
1547         struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
1548
1549         schedule_work(&adapter->watchdog_task);
1550         /* timer will be rescheduled in watchdog task */
1551 }
1552
1553 /**
1554  * i40evf_watchdog_task - Periodic call-back task
1555  * @work: pointer to work_struct
1556  **/
1557 static void i40evf_watchdog_task(struct work_struct *work)
1558 {
1559         struct i40evf_adapter *adapter = container_of(work,
1560                                                       struct i40evf_adapter,
1561                                                       watchdog_task);
1562         struct i40e_hw *hw = &adapter->hw;
1563         u32 reg_val;
1564
1565         if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1566                 goto restart_watchdog;
1567
1568         if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1569                 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1570                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1571                 if ((reg_val == I40E_VFR_VFACTIVE) ||
1572                     (reg_val == I40E_VFR_COMPLETED)) {
1573                         /* A chance for redemption! */
1574                         dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1575                         adapter->state = __I40EVF_STARTUP;
1576                         adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1577                         schedule_delayed_work(&adapter->init_task, 10);
1578                         clear_bit(__I40EVF_IN_CRITICAL_TASK,
1579                                   &adapter->crit_section);
1580                         /* Don't reschedule the watchdog, since we've restarted
1581                          * the init task. When init_task contacts the PF and
1582                          * gets everything set up again, it'll restart the
1583                          * watchdog for us. Down, boy. Sit. Stay. Woof.
1584                          */
1585                         return;
1586                 }
1587                 adapter->aq_required = 0;
1588                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1589                 goto watchdog_done;
1590         }
1591
1592         if ((adapter->state < __I40EVF_DOWN) ||
1593             (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1594                 goto watchdog_done;
1595
1596         /* check for reset */
1597         reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
1598         if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
1599                 adapter->state = __I40EVF_RESETTING;
1600                 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1601                 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1602                 schedule_work(&adapter->reset_task);
1603                 adapter->aq_required = 0;
1604                 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1605                 goto watchdog_done;
1606         }
1607
1608         /* Process admin queue tasks. After init, everything gets done
1609          * here so we don't race on the admin queue.
1610          */
1611         if (adapter->current_op) {
1612                 if (!i40evf_asq_done(hw)) {
1613                         dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1614                         i40evf_send_api_ver(adapter);
1615                 }
1616                 goto watchdog_done;
1617         }
1618         if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
1619                 i40evf_send_vf_config_msg(adapter);
1620                 goto watchdog_done;
1621         }
1622
1623         if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
1624                 i40evf_disable_queues(adapter);
1625                 goto watchdog_done;
1626         }
1627
1628         if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1629                 i40evf_map_queues(adapter);
1630                 goto watchdog_done;
1631         }
1632
1633         if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
1634                 i40evf_add_ether_addrs(adapter);
1635                 goto watchdog_done;
1636         }
1637
1638         if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
1639                 i40evf_add_vlans(adapter);
1640                 goto watchdog_done;
1641         }
1642
1643         if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
1644                 i40evf_del_ether_addrs(adapter);
1645                 goto watchdog_done;
1646         }
1647
1648         if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
1649                 i40evf_del_vlans(adapter);
1650                 goto watchdog_done;
1651         }
1652
1653         if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
1654                 i40evf_configure_queues(adapter);
1655                 goto watchdog_done;
1656         }
1657
1658         if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
1659                 i40evf_enable_queues(adapter);
1660                 goto watchdog_done;
1661         }
1662
1663         if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
1664                 /* This message goes straight to the firmware, not the
1665                  * PF, so we don't have to set current_op as we will
1666                  * not get a response through the ARQ.
1667                  */
1668                 i40evf_init_rss(adapter);
1669                 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
1670                 goto watchdog_done;
1671         }
1672         if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
1673                 i40evf_get_hena(adapter);
1674                 goto watchdog_done;
1675         }
1676         if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
1677                 i40evf_set_hena(adapter);
1678                 goto watchdog_done;
1679         }
1680         if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
1681                 i40evf_set_rss_key(adapter);
1682                 goto watchdog_done;
1683         }
1684         if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
1685                 i40evf_set_rss_lut(adapter);
1686                 goto watchdog_done;
1687         }
1688
1689         if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
1690                 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
1691                                        I40E_FLAG_VF_MULTICAST_PROMISC);
1692                 goto watchdog_done;
1693         }
1694
1695         if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
1696                 i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
1697                 goto watchdog_done;
1698         }
1699
1700         if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
1701             (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1702                 i40evf_set_promiscuous(adapter, 0);
1703                 goto watchdog_done;
1704         }
1705         schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1706
1707         if (adapter->state == __I40EVF_RUNNING)
1708                 i40evf_request_stats(adapter);
1709 watchdog_done:
1710         if (adapter->state == __I40EVF_RUNNING) {
1711                 i40evf_irq_enable_queues(adapter, ~0);
1712                 i40evf_fire_sw_int(adapter, 0xFF);
1713         } else {
1714                 i40evf_fire_sw_int(adapter, 0x1);
1715         }
1716
1717         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1718 restart_watchdog:
1719         if (adapter->state == __I40EVF_REMOVE)
1720                 return;
1721         if (adapter->aq_required)
1722                 mod_timer(&adapter->watchdog_timer,
1723                           jiffies + msecs_to_jiffies(20));
1724         else
1725                 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1726         schedule_work(&adapter->adminq_task);
1727 }
1728
1729 static void i40evf_disable_vf(struct i40evf_adapter *adapter)
1730 {
1731         struct i40evf_mac_filter *f, *ftmp;
1732         struct i40evf_vlan_filter *fv, *fvtmp;
1733
1734         adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1735
1736         if (netif_running(adapter->netdev)) {
1737                 set_bit(__I40E_DOWN, &adapter->vsi.state);
1738                 netif_carrier_off(adapter->netdev);
1739                 netif_tx_disable(adapter->netdev);
1740                 adapter->link_up = false;
1741                 i40evf_napi_disable_all(adapter);
1742                 i40evf_irq_disable(adapter);
1743                 i40evf_free_traffic_irqs(adapter);
1744                 i40evf_free_all_tx_resources(adapter);
1745                 i40evf_free_all_rx_resources(adapter);
1746         }
1747
1748         /* Delete all of the filters, both MAC and VLAN. */
1749         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1750                 list_del(&f->list);
1751                 kfree(f);
1752         }
1753
1754         list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1755                 list_del(&fv->list);
1756                 kfree(fv);
1757         }
1758
1759         i40evf_free_misc_irq(adapter);
1760         i40evf_reset_interrupt_capability(adapter);
1761         i40evf_free_queues(adapter);
1762         i40evf_free_q_vectors(adapter);
1763         kfree(adapter->vf_res);
1764         i40evf_shutdown_adminq(&adapter->hw);
1765         adapter->netdev->flags &= ~IFF_UP;
1766         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1767         adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1768         adapter->state = __I40EVF_DOWN;
1769         dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1770 }
1771
1772 #define I40EVF_RESET_WAIT_MS 10
1773 #define I40EVF_RESET_WAIT_COUNT 500
1774 /**
1775  * i40evf_reset_task - Call-back task to handle hardware reset
1776  * @work: pointer to work_struct
1777  *
1778  * During reset we need to shut down and reinitialize the admin queue
1779  * before we can use it to communicate with the PF again. We also clear
1780  * and reinit the rings because that context is lost as well.
1781  **/
1782 static void i40evf_reset_task(struct work_struct *work)
1783 {
1784         struct i40evf_adapter *adapter = container_of(work,
1785                                                       struct i40evf_adapter,
1786                                                       reset_task);
1787         struct net_device *netdev = adapter->netdev;
1788         struct i40e_hw *hw = &adapter->hw;
1789         struct i40evf_vlan_filter *vlf;
1790         struct i40evf_mac_filter *f;
1791         u32 reg_val;
1792         int i = 0, err;
1793
1794         while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1795                                 &adapter->crit_section))
1796                 usleep_range(500, 1000);
1797         if (CLIENT_ENABLED(adapter)) {
1798                 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1799                                     I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1800                                     I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1801                                     I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1802                 cancel_delayed_work_sync(&adapter->client_task);
1803                 i40evf_notify_client_close(&adapter->vsi, true);
1804         }
1805         i40evf_misc_irq_disable(adapter);
1806         if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1807                 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
1808                 /* Restart the AQ here. If we have been reset but didn't
1809                  * detect it, or if the PF had to reinit, our AQ will be hosed.
1810                  */
1811                 i40evf_shutdown_adminq(hw);
1812                 i40evf_init_adminq(hw);
1813                 i40evf_request_reset(adapter);
1814         }
1815         adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1816
1817         /* poll until we see the reset actually happen */
1818         for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1819                 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1820                           I40E_VF_ARQLEN1_ARQENABLE_MASK;
1821                 if (!reg_val)
1822                         break;
1823                 usleep_range(5000, 10000);
1824         }
1825         if (i == I40EVF_RESET_WAIT_COUNT) {
1826                 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1827                 goto continue_reset; /* act like the reset happened */
1828         }
1829
1830         /* wait until the reset is complete and the PF is responding to us */
1831         for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1832                 /* sleep first to make sure a minimum wait time is met */
1833                 msleep(I40EVF_RESET_WAIT_MS);
1834
1835                 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1836                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1837                 if (reg_val == I40E_VFR_VFACTIVE)
1838                         break;
1839         }
1840
1841         pci_set_master(adapter->pdev);
1842
1843         if (i == I40EVF_RESET_WAIT_COUNT) {
1844                 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1845                         reg_val);
1846                 i40evf_disable_vf(adapter);
1847                 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1848                 return; /* Do not attempt to reinit. It's dead, Jim. */
1849         }
1850
1851 continue_reset:
1852         if (netif_running(adapter->netdev)) {
1853                 netif_carrier_off(netdev);
1854                 netif_tx_stop_all_queues(netdev);
1855                 adapter->link_up = false;
1856                 i40evf_napi_disable_all(adapter);
1857         }
1858         i40evf_irq_disable(adapter);
1859
1860         adapter->state = __I40EVF_RESETTING;
1861         adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1862
1863         /* free the Tx/Rx rings and descriptors, might be better to just
1864          * re-use them sometime in the future
1865          */
1866         i40evf_free_all_rx_resources(adapter);
1867         i40evf_free_all_tx_resources(adapter);
1868
1869         /* kill and reinit the admin queue */
1870         i40evf_shutdown_adminq(hw);
1871         adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1872         err = i40evf_init_adminq(hw);
1873         if (err)
1874                 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1875                          err);
1876
1877         adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
1878         adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
1879
1880         /* re-add all MAC filters */
1881         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1882                 f->add = true;
1883         }
1884         /* re-add all VLAN filters */
1885         list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1886                 vlf->add = true;
1887         }
1888         adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1889         adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1890         clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1891         clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1892         i40evf_misc_irq_enable(adapter);
1893
1894         mod_timer(&adapter->watchdog_timer, jiffies + 2);
1895
1896         if (netif_running(adapter->netdev)) {
1897                 /* allocate transmit descriptors */
1898                 err = i40evf_setup_all_tx_resources(adapter);
1899                 if (err)
1900                         goto reset_err;
1901
1902                 /* allocate receive descriptors */
1903                 err = i40evf_setup_all_rx_resources(adapter);
1904                 if (err)
1905                         goto reset_err;
1906
1907                 i40evf_configure(adapter);
1908
1909                 i40evf_up_complete(adapter);
1910
1911                 i40evf_irq_enable(adapter, true);
1912         } else {
1913                 adapter->state = __I40EVF_DOWN;
1914         }
1915
1916         return;
1917 reset_err:
1918         dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
1919         i40evf_close(adapter->netdev);
1920 }
1921
1922 /**
1923  * i40evf_adminq_task - worker thread to clean the admin queue
1924  * @work: pointer to work_struct containing our data
1925  **/
1926 static void i40evf_adminq_task(struct work_struct *work)
1927 {
1928         struct i40evf_adapter *adapter =
1929                 container_of(work, struct i40evf_adapter, adminq_task);
1930         struct i40e_hw *hw = &adapter->hw;
1931         struct i40e_arq_event_info event;
1932         struct i40e_virtchnl_msg *v_msg;
1933         i40e_status ret;
1934         u32 val, oldval;
1935         u16 pending;
1936
1937         if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1938                 goto out;
1939
1940         event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
1941         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1942         if (!event.msg_buf)
1943                 goto out;
1944
1945         v_msg = (struct i40e_virtchnl_msg *)&event.desc;
1946         do {
1947                 ret = i40evf_clean_arq_element(hw, &event, &pending);
1948                 if (ret || !v_msg->v_opcode)
1949                         break; /* No event to process or error cleaning ARQ */
1950
1951                 i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
1952                                            v_msg->v_retval, event.msg_buf,
1953                                            event.msg_len);
1954                 if (pending != 0)
1955                         memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
1956         } while (pending);
1957
1958         if ((adapter->flags &
1959              (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
1960             adapter->state == __I40EVF_RESETTING)
1961                 goto freedom;
1962
1963         /* check for error indications */
1964         val = rd32(hw, hw->aq.arq.len);
1965         if (val == 0xdeadbeef) /* indicates device in reset */
1966                 goto freedom;
1967         oldval = val;
1968         if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1969                 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
1970                 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1971         }
1972         if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1973                 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
1974                 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1975         }
1976         if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1977                 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
1978                 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1979         }
1980         if (oldval != val)
1981                 wr32(hw, hw->aq.arq.len, val);
1982
1983         val = rd32(hw, hw->aq.asq.len);
1984         oldval = val;
1985         if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1986                 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
1987                 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1988         }
1989         if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1990                 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
1991                 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1992         }
1993         if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1994                 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
1995                 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1996         }
1997         if (oldval != val)
1998                 wr32(hw, hw->aq.asq.len, val);
1999
2000 freedom:
2001         kfree(event.msg_buf);
2002 out:
2003         /* re-enable Admin queue interrupt cause */
2004         i40evf_misc_irq_enable(adapter);
2005 }
2006
2007 /**
2008  * i40evf_client_task - worker thread to perform client work
2009  * @work: pointer to work_struct containing our data
2010  *
2011  * This task handles client interactions. Because client calls can be
2012  * reentrant, we can't handle them in the watchdog.
2013  **/
2014 static void i40evf_client_task(struct work_struct *work)
2015 {
2016         struct i40evf_adapter *adapter =
2017                 container_of(work, struct i40evf_adapter, client_task.work);
2018
2019         /* If we can't get the client bit, just give up. We'll be rescheduled
2020          * later.
2021          */
2022
2023         if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2024                 return;
2025
2026         if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2027                 i40evf_client_subtask(adapter);
2028                 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2029                 goto out;
2030         }
2031         if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2032                 i40evf_notify_client_close(&adapter->vsi, false);
2033                 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2034                 goto out;
2035         }
2036         if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2037                 i40evf_notify_client_open(&adapter->vsi);
2038                 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2039                 goto out;
2040         }
2041         if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2042                 i40evf_notify_client_l2_params(&adapter->vsi);
2043                 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2044         }
2045 out:
2046         clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2047 }
2048
2049 /**
2050  * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2051  * @adapter: board private structure
2052  *
2053  * Free all transmit software resources
2054  **/
2055 void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
2056 {
2057         int i;
2058
2059         if (!adapter->tx_rings)
2060                 return;
2061
2062         for (i = 0; i < adapter->num_active_queues; i++)
2063                 if (adapter->tx_rings[i].desc)
2064                         i40evf_free_tx_resources(&adapter->tx_rings[i]);
2065 }
2066
2067 /**
2068  * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2069  * @adapter: board private structure
2070  *
2071  * If this function returns with an error, then it's possible one or
2072  * more of the rings is populated (while the rest are not).  It is the
2073  * callers duty to clean those orphaned rings.
2074  *
2075  * Return 0 on success, negative on failure
2076  **/
2077 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
2078 {
2079         int i, err = 0;
2080
2081         for (i = 0; i < adapter->num_active_queues; i++) {
2082                 adapter->tx_rings[i].count = adapter->tx_desc_count;
2083                 err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
2084                 if (!err)
2085                         continue;
2086                 dev_err(&adapter->pdev->dev,
2087                         "Allocation for Tx Queue %u failed\n", i);
2088                 break;
2089         }
2090
2091         return err;
2092 }
2093
2094 /**
2095  * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2096  * @adapter: board private structure
2097  *
2098  * If this function returns with an error, then it's possible one or
2099  * more of the rings is populated (while the rest are not).  It is the
2100  * callers duty to clean those orphaned rings.
2101  *
2102  * Return 0 on success, negative on failure
2103  **/
2104 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
2105 {
2106         int i, err = 0;
2107
2108         for (i = 0; i < adapter->num_active_queues; i++) {
2109                 adapter->rx_rings[i].count = adapter->rx_desc_count;
2110                 err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
2111                 if (!err)
2112                         continue;
2113                 dev_err(&adapter->pdev->dev,
2114                         "Allocation for Rx Queue %u failed\n", i);
2115                 break;
2116         }
2117         return err;
2118 }
2119
2120 /**
2121  * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2122  * @adapter: board private structure
2123  *
2124  * Free all receive software resources
2125  **/
2126 void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
2127 {
2128         int i;
2129
2130         if (!adapter->rx_rings)
2131                 return;
2132
2133         for (i = 0; i < adapter->num_active_queues; i++)
2134                 if (adapter->rx_rings[i].desc)
2135                         i40evf_free_rx_resources(&adapter->rx_rings[i]);
2136 }
2137
2138 /**
2139  * i40evf_open - Called when a network interface is made active
2140  * @netdev: network interface device structure
2141  *
2142  * Returns 0 on success, negative value on failure
2143  *
2144  * The open entry point is called when a network interface is made
2145  * active by the system (IFF_UP).  At this point all resources needed
2146  * for transmit and receive operations are allocated, the interrupt
2147  * handler is registered with the OS, the watchdog timer is started,
2148  * and the stack is notified that the interface is ready.
2149  **/
2150 static int i40evf_open(struct net_device *netdev)
2151 {
2152         struct i40evf_adapter *adapter = netdev_priv(netdev);
2153         int err;
2154
2155         if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
2156                 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2157                 return -EIO;
2158         }
2159
2160         if (adapter->state != __I40EVF_DOWN)
2161                 return -EBUSY;
2162
2163         /* allocate transmit descriptors */
2164         err = i40evf_setup_all_tx_resources(adapter);
2165         if (err)
2166                 goto err_setup_tx;
2167
2168         /* allocate receive descriptors */
2169         err = i40evf_setup_all_rx_resources(adapter);
2170         if (err)
2171                 goto err_setup_rx;
2172
2173         /* clear any pending interrupts, may auto mask */
2174         err = i40evf_request_traffic_irqs(adapter, netdev->name);
2175         if (err)
2176                 goto err_req_irq;
2177
2178         i40evf_add_filter(adapter, adapter->hw.mac.addr);
2179         i40evf_configure(adapter);
2180
2181         i40evf_up_complete(adapter);
2182
2183         i40evf_irq_enable(adapter, true);
2184
2185         return 0;
2186
2187 err_req_irq:
2188         i40evf_down(adapter);
2189         i40evf_free_traffic_irqs(adapter);
2190 err_setup_rx:
2191         i40evf_free_all_rx_resources(adapter);
2192 err_setup_tx:
2193         i40evf_free_all_tx_resources(adapter);
2194
2195         return err;
2196 }
2197
2198 /**
2199  * i40evf_close - Disables a network interface
2200  * @netdev: network interface device structure
2201  *
2202  * Returns 0, this is not allowed to fail
2203  *
2204  * The close entry point is called when an interface is de-activated
2205  * by the OS.  The hardware is still under the drivers control, but
2206  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2207  * are freed, along with all transmit and receive resources.
2208  **/
2209 static int i40evf_close(struct net_device *netdev)
2210 {
2211         struct i40evf_adapter *adapter = netdev_priv(netdev);
2212
2213         if (adapter->state <= __I40EVF_DOWN_PENDING)
2214                 return 0;
2215
2216
2217         set_bit(__I40E_DOWN, &adapter->vsi.state);
2218         if (CLIENT_ENABLED(adapter))
2219                 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2220
2221         i40evf_down(adapter);
2222         adapter->state = __I40EVF_DOWN_PENDING;
2223         i40evf_free_traffic_irqs(adapter);
2224
2225         /* We explicitly don't free resources here because the hardware is
2226          * still active and can DMA into memory. Resources are cleared in
2227          * i40evf_virtchnl_completion() after we get confirmation from the PF
2228          * driver that the rings have been stopped.
2229          */
2230         return 0;
2231 }
2232
2233 /**
2234  * i40evf_get_stats - Get System Network Statistics
2235  * @netdev: network interface device structure
2236  *
2237  * Returns the address of the device statistics structure.
2238  * The statistics are actually updated from the timer callback.
2239  **/
2240 static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
2241 {
2242         struct i40evf_adapter *adapter = netdev_priv(netdev);
2243
2244         /* only return the current stats */
2245         return &adapter->net_stats;
2246 }
2247
2248 /**
2249  * i40evf_change_mtu - Change the Maximum Transfer Unit
2250  * @netdev: network interface device structure
2251  * @new_mtu: new value for maximum frame size
2252  *
2253  * Returns 0 on success, negative on failure
2254  **/
2255 static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
2256 {
2257         struct i40evf_adapter *adapter = netdev_priv(netdev);
2258
2259         netdev->mtu = new_mtu;
2260         if (CLIENT_ENABLED(adapter)) {
2261                 i40evf_notify_client_l2_params(&adapter->vsi);
2262                 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2263         }
2264         adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
2265         schedule_work(&adapter->reset_task);
2266
2267         return 0;
2268 }
2269
2270 /**
2271  * i40evf_features_check - Validate encapsulated packet conforms to limits
2272  * @skb: skb buff
2273  * @netdev: This physical port's netdev
2274  * @features: Offload features that the stack believes apply
2275  **/
2276 static netdev_features_t i40evf_features_check(struct sk_buff *skb,
2277                                                struct net_device *dev,
2278                                                netdev_features_t features)
2279 {
2280         size_t len;
2281
2282         /* No point in doing any of this if neither checksum nor GSO are
2283          * being requested for this frame.  We can rule out both by just
2284          * checking for CHECKSUM_PARTIAL
2285          */
2286         if (skb->ip_summed != CHECKSUM_PARTIAL)
2287                 return features;
2288
2289         /* We cannot support GSO if the MSS is going to be less than
2290          * 64 bytes.  If it is then we need to drop support for GSO.
2291          */
2292         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
2293                 features &= ~NETIF_F_GSO_MASK;
2294
2295         /* MACLEN can support at most 63 words */
2296         len = skb_network_header(skb) - skb->data;
2297         if (len & ~(63 * 2))
2298                 goto out_err;
2299
2300         /* IPLEN and EIPLEN can support at most 127 dwords */
2301         len = skb_transport_header(skb) - skb_network_header(skb);
2302         if (len & ~(127 * 4))
2303                 goto out_err;
2304
2305         if (skb->encapsulation) {
2306                 /* L4TUNLEN can support 127 words */
2307                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
2308                 if (len & ~(127 * 2))
2309                         goto out_err;
2310
2311                 /* IPLEN can support at most 127 dwords */
2312                 len = skb_inner_transport_header(skb) -
2313                       skb_inner_network_header(skb);
2314                 if (len & ~(127 * 4))
2315                         goto out_err;
2316         }
2317
2318         /* No need to validate L4LEN as TCP is the only protocol with a
2319          * a flexible value and we support all possible values supported
2320          * by TCP, which is at most 15 dwords
2321          */
2322
2323         return features;
2324 out_err:
2325         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2326 }
2327
2328 #define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
2329                               NETIF_F_HW_VLAN_CTAG_RX |\
2330                               NETIF_F_HW_VLAN_CTAG_FILTER)
2331
2332 /**
2333  * i40evf_fix_features - fix up the netdev feature bits
2334  * @netdev: our net device
2335  * @features: desired feature bits
2336  *
2337  * Returns fixed-up features bits
2338  **/
2339 static netdev_features_t i40evf_fix_features(struct net_device *netdev,
2340                                              netdev_features_t features)
2341 {
2342         struct i40evf_adapter *adapter = netdev_priv(netdev);
2343
2344         features &= ~I40EVF_VLAN_FEATURES;
2345         if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
2346                 features |= I40EVF_VLAN_FEATURES;
2347         return features;
2348 }
2349
2350 static const struct net_device_ops i40evf_netdev_ops = {
2351         .ndo_open               = i40evf_open,
2352         .ndo_stop               = i40evf_close,
2353         .ndo_start_xmit         = i40evf_xmit_frame,
2354         .ndo_get_stats          = i40evf_get_stats,
2355         .ndo_set_rx_mode        = i40evf_set_rx_mode,
2356         .ndo_validate_addr      = eth_validate_addr,
2357         .ndo_set_mac_address    = i40evf_set_mac,
2358         .ndo_change_mtu         = i40evf_change_mtu,
2359         .ndo_tx_timeout         = i40evf_tx_timeout,
2360         .ndo_vlan_rx_add_vid    = i40evf_vlan_rx_add_vid,
2361         .ndo_vlan_rx_kill_vid   = i40evf_vlan_rx_kill_vid,
2362         .ndo_features_check     = i40evf_features_check,
2363         .ndo_fix_features       = i40evf_fix_features,
2364 #ifdef CONFIG_NET_POLL_CONTROLLER
2365         .ndo_poll_controller    = i40evf_netpoll,
2366 #endif
2367 };
2368
2369 /**
2370  * i40evf_check_reset_complete - check that VF reset is complete
2371  * @hw: pointer to hw struct
2372  *
2373  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2374  **/
2375 static int i40evf_check_reset_complete(struct i40e_hw *hw)
2376 {
2377         u32 rstat;
2378         int i;
2379
2380         for (i = 0; i < 100; i++) {
2381                 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
2382                             I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2383                 if ((rstat == I40E_VFR_VFACTIVE) ||
2384                     (rstat == I40E_VFR_COMPLETED))
2385                         return 0;
2386                 usleep_range(10, 20);
2387         }
2388         return -EBUSY;
2389 }
2390
2391 /**
2392  * i40evf_process_config - Process the config information we got from the PF
2393  * @adapter: board private structure
2394  *
2395  * Verify that we have a valid config struct, and set up our netdev features
2396  * and our VSI struct.
2397  **/
2398 int i40evf_process_config(struct i40evf_adapter *adapter)
2399 {
2400         struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
2401         struct net_device *netdev = adapter->netdev;
2402         struct i40e_vsi *vsi = &adapter->vsi;
2403         int i;
2404
2405         /* got VF config message back from PF, now we can parse it */
2406         for (i = 0; i < vfres->num_vsis; i++) {
2407                 if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
2408                         adapter->vsi_res = &vfres->vsi_res[i];
2409         }
2410         if (!adapter->vsi_res) {
2411                 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2412                 return -ENODEV;
2413         }
2414
2415         netdev->hw_enc_features |= NETIF_F_SG                   |
2416                                    NETIF_F_IP_CSUM              |
2417                                    NETIF_F_IPV6_CSUM            |
2418                                    NETIF_F_HIGHDMA              |
2419                                    NETIF_F_SOFT_FEATURES        |
2420                                    NETIF_F_TSO                  |
2421                                    NETIF_F_TSO_ECN              |
2422                                    NETIF_F_TSO6                 |
2423                                    NETIF_F_GSO_GRE              |
2424                                    NETIF_F_GSO_GRE_CSUM         |
2425                                    NETIF_F_GSO_IPXIP4           |
2426                                    NETIF_F_GSO_IPXIP6           |
2427                                    NETIF_F_GSO_UDP_TUNNEL       |
2428                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
2429                                    NETIF_F_GSO_PARTIAL          |
2430                                    NETIF_F_SCTP_CRC             |
2431                                    NETIF_F_RXHASH               |
2432                                    NETIF_F_RXCSUM               |
2433                                    0;
2434
2435         if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
2436                 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2437
2438         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2439
2440         /* record features VLANs can make use of */
2441         netdev->vlan_features |= netdev->hw_enc_features |
2442                                  NETIF_F_TSO_MANGLEID;
2443
2444         /* Write features and hw_features separately to avoid polluting
2445          * with, or dropping, features that are set when we registgered.
2446          */
2447         netdev->hw_features |= netdev->hw_enc_features;
2448
2449         netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
2450         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2451
2452         /* disable VLAN features if not supported */
2453         if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
2454                 netdev->features ^= I40EVF_VLAN_FEATURES;
2455
2456         adapter->vsi.id = adapter->vsi_res->vsi_id;
2457
2458         adapter->vsi.back = adapter;
2459         adapter->vsi.base_vector = 1;
2460         adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
2461         vsi->netdev = adapter->netdev;
2462         vsi->qs_handle = adapter->vsi_res->qset_handle;
2463         if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2464                 adapter->rss_key_size = vfres->rss_key_size;
2465                 adapter->rss_lut_size = vfres->rss_lut_size;
2466         } else {
2467                 adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
2468                 adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
2469         }
2470
2471         return 0;
2472 }
2473
2474 /**
2475  * i40evf_init_task - worker thread to perform delayed initialization
2476  * @work: pointer to work_struct containing our data
2477  *
2478  * This task completes the work that was begun in probe. Due to the nature
2479  * of VF-PF communications, we may need to wait tens of milliseconds to get
2480  * responses back from the PF. Rather than busy-wait in probe and bog down the
2481  * whole system, we'll do it in a task so we can sleep.
2482  * This task only runs during driver init. Once we've established
2483  * communications with the PF driver and set up our netdev, the watchdog
2484  * takes over.
2485  **/
2486 static void i40evf_init_task(struct work_struct *work)
2487 {
2488         struct i40evf_adapter *adapter = container_of(work,
2489                                                       struct i40evf_adapter,
2490                                                       init_task.work);
2491         struct net_device *netdev = adapter->netdev;
2492         struct i40e_hw *hw = &adapter->hw;
2493         struct pci_dev *pdev = adapter->pdev;
2494         int err, bufsz;
2495
2496         switch (adapter->state) {
2497         case __I40EVF_STARTUP:
2498                 /* driver loaded, probe complete */
2499                 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
2500                 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
2501                 err = i40e_set_mac_type(hw);
2502                 if (err) {
2503                         dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
2504                                 err);
2505                         goto err;
2506                 }
2507                 err = i40evf_check_reset_complete(hw);
2508                 if (err) {
2509                         dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2510                                  err);
2511                         goto err;
2512                 }
2513                 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
2514                 hw->aq.num_asq_entries = I40EVF_AQ_LEN;
2515                 hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2516                 hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2517
2518                 err = i40evf_init_adminq(hw);
2519                 if (err) {
2520                         dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2521                                 err);
2522                         goto err;
2523                 }
2524                 err = i40evf_send_api_ver(adapter);
2525                 if (err) {
2526                         dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
2527                         i40evf_shutdown_adminq(hw);
2528                         goto err;
2529                 }
2530                 adapter->state = __I40EVF_INIT_VERSION_CHECK;
2531                 goto restart;
2532         case __I40EVF_INIT_VERSION_CHECK:
2533                 if (!i40evf_asq_done(hw)) {
2534                         dev_err(&pdev->dev, "Admin queue command never completed\n");
2535                         i40evf_shutdown_adminq(hw);
2536                         adapter->state = __I40EVF_STARTUP;
2537                         goto err;
2538                 }
2539
2540                 /* aq msg sent, awaiting reply */
2541                 err = i40evf_verify_api_ver(adapter);
2542                 if (err) {
2543                         if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
2544                                 err = i40evf_send_api_ver(adapter);
2545                         else
2546                                 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2547                                         adapter->pf_version.major,
2548                                         adapter->pf_version.minor,
2549                                         I40E_VIRTCHNL_VERSION_MAJOR,
2550                                         I40E_VIRTCHNL_VERSION_MINOR);
2551                         goto err;
2552                 }
2553                 err = i40evf_send_vf_config_msg(adapter);
2554                 if (err) {
2555                         dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2556                                 err);
2557                         goto err;
2558                 }
2559                 adapter->state = __I40EVF_INIT_GET_RESOURCES;
2560                 goto restart;
2561         case __I40EVF_INIT_GET_RESOURCES:
2562                 /* aq msg sent, awaiting reply */
2563                 if (!adapter->vf_res) {
2564                         bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
2565                                 (I40E_MAX_VF_VSI *
2566                                  sizeof(struct i40e_virtchnl_vsi_resource));
2567                         adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
2568                         if (!adapter->vf_res)
2569                                 goto err;
2570                 }
2571                 err = i40evf_get_vf_config(adapter);
2572                 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2573                         err = i40evf_send_vf_config_msg(adapter);
2574                         goto err;
2575                 } else if (err == I40E_ERR_PARAM) {
2576                         /* We only get ERR_PARAM if the device is in a very bad
2577                          * state or if we've been disabled for previous bad
2578                          * behavior. Either way, we're done now.
2579                          */
2580                         i40evf_shutdown_adminq(hw);
2581                         dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2582                         return;
2583                 }
2584                 if (err) {
2585                         dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
2586                                 err);
2587                         goto err_alloc;
2588                 }
2589                 adapter->state = __I40EVF_INIT_SW;
2590                 break;
2591         default:
2592                 goto err_alloc;
2593         }
2594
2595         if (hw->mac.type == I40E_MAC_X722_VF)
2596                 adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE;
2597
2598         if (i40evf_process_config(adapter))
2599                 goto err_alloc;
2600         adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
2601
2602         adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
2603
2604         netdev->netdev_ops = &i40evf_netdev_ops;
2605         i40evf_set_ethtool_ops(netdev);
2606         netdev->watchdog_timeo = 5 * HZ;
2607
2608         /* MTU range: 68 - 9710 */
2609         netdev->min_mtu = ETH_MIN_MTU;
2610         netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
2611
2612         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2613                 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2614                          adapter->hw.mac.addr);
2615                 eth_hw_addr_random(netdev);
2616                 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2617         } else {
2618                 adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
2619                 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2620                 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2621         }
2622
2623         init_timer(&adapter->watchdog_timer);
2624         adapter->watchdog_timer.function = &i40evf_watchdog_timer;
2625         adapter->watchdog_timer.data = (unsigned long)adapter;
2626         mod_timer(&adapter->watchdog_timer, jiffies + 1);
2627
2628         adapter->num_active_queues = min_t(int,
2629                                            adapter->vsi_res->num_queue_pairs,
2630                                            (int)(num_online_cpus()));
2631         adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2632         adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2633         err = i40evf_init_interrupt_scheme(adapter);
2634         if (err)
2635                 goto err_sw_init;
2636         i40evf_map_rings_to_vectors(adapter);
2637         if (adapter->vf_res->vf_offload_flags &
2638             I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2639                 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
2640
2641         err = i40evf_request_misc_irq(adapter);
2642         if (err)
2643                 goto err_sw_init;
2644
2645         netif_carrier_off(netdev);
2646         adapter->link_up = false;
2647
2648         if (!adapter->netdev_registered) {
2649                 err = register_netdev(netdev);
2650                 if (err)
2651                         goto err_register;
2652         }
2653
2654         adapter->netdev_registered = true;
2655
2656         netif_tx_stop_all_queues(netdev);
2657         if (CLIENT_ALLOWED(adapter)) {
2658                 err = i40evf_lan_add_device(adapter);
2659                 if (err)
2660                         dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2661                                  err);
2662         }
2663
2664         dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2665         if (netdev->features & NETIF_F_GRO)
2666                 dev_info(&pdev->dev, "GRO is enabled\n");
2667
2668         adapter->state = __I40EVF_DOWN;
2669         set_bit(__I40E_DOWN, &adapter->vsi.state);
2670         i40evf_misc_irq_enable(adapter);
2671
2672         adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2673         adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2674         if (!adapter->rss_key || !adapter->rss_lut)
2675                 goto err_mem;
2676
2677         if (RSS_AQ(adapter)) {
2678                 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
2679                 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
2680         } else {
2681                 i40evf_init_rss(adapter);
2682         }
2683         return;
2684 restart:
2685         schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
2686         return;
2687 err_mem:
2688         i40evf_free_rss(adapter);
2689 err_register:
2690         i40evf_free_misc_irq(adapter);
2691 err_sw_init:
2692         i40evf_reset_interrupt_capability(adapter);
2693 err_alloc:
2694         kfree(adapter->vf_res);
2695         adapter->vf_res = NULL;
2696 err:
2697         /* Things went into the weeds, so try again later */
2698         if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2699                 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
2700                 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2701                 i40evf_shutdown_adminq(hw);
2702                 adapter->state = __I40EVF_STARTUP;
2703                 schedule_delayed_work(&adapter->init_task, HZ * 5);
2704                 return;
2705         }
2706         schedule_delayed_work(&adapter->init_task, HZ);
2707 }
2708
2709 /**
2710  * i40evf_shutdown - Shutdown the device in preparation for a reboot
2711  * @pdev: pci device structure
2712  **/
2713 static void i40evf_shutdown(struct pci_dev *pdev)
2714 {
2715         struct net_device *netdev = pci_get_drvdata(pdev);
2716         struct i40evf_adapter *adapter = netdev_priv(netdev);
2717
2718         netif_device_detach(netdev);
2719
2720         if (netif_running(netdev))
2721                 i40evf_close(netdev);
2722
2723         /* Prevent the watchdog from running. */
2724         adapter->state = __I40EVF_REMOVE;
2725         adapter->aq_required = 0;
2726
2727 #ifdef CONFIG_PM
2728         pci_save_state(pdev);
2729
2730 #endif
2731         pci_disable_device(pdev);
2732 }
2733
2734 /**
2735  * i40evf_probe - Device Initialization Routine
2736  * @pdev: PCI device information struct
2737  * @ent: entry in i40evf_pci_tbl
2738  *
2739  * Returns 0 on success, negative on failure
2740  *
2741  * i40evf_probe initializes an adapter identified by a pci_dev structure.
2742  * The OS initialization, configuring of the adapter private structure,
2743  * and a hardware reset occur.
2744  **/
2745 static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2746 {
2747         struct net_device *netdev;
2748         struct i40evf_adapter *adapter = NULL;
2749         struct i40e_hw *hw = NULL;
2750         int err;
2751
2752         err = pci_enable_device(pdev);
2753         if (err)
2754                 return err;
2755
2756         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2757         if (err) {
2758                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2759                 if (err) {
2760                         dev_err(&pdev->dev,
2761                                 "DMA configuration failed: 0x%x\n", err);
2762                         goto err_dma;
2763                 }
2764         }
2765
2766         err = pci_request_regions(pdev, i40evf_driver_name);
2767         if (err) {
2768                 dev_err(&pdev->dev,
2769                         "pci_request_regions failed 0x%x\n", err);
2770                 goto err_pci_reg;
2771         }
2772
2773         pci_enable_pcie_error_reporting(pdev);
2774
2775         pci_set_master(pdev);
2776
2777         netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
2778         if (!netdev) {
2779                 err = -ENOMEM;
2780                 goto err_alloc_etherdev;
2781         }
2782
2783         SET_NETDEV_DEV(netdev, &pdev->dev);
2784
2785         pci_set_drvdata(pdev, netdev);
2786         adapter = netdev_priv(netdev);
2787
2788         adapter->netdev = netdev;
2789         adapter->pdev = pdev;
2790
2791         hw = &adapter->hw;
2792         hw->back = adapter;
2793
2794         adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
2795         adapter->state = __I40EVF_STARTUP;
2796
2797         /* Call save state here because it relies on the adapter struct. */
2798         pci_save_state(pdev);
2799
2800         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
2801                               pci_resource_len(pdev, 0));
2802         if (!hw->hw_addr) {
2803                 err = -EIO;
2804                 goto err_ioremap;
2805         }
2806         hw->vendor_id = pdev->vendor;
2807         hw->device_id = pdev->device;
2808         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2809         hw->subsystem_vendor_id = pdev->subsystem_vendor;
2810         hw->subsystem_device_id = pdev->subsystem_device;
2811         hw->bus.device = PCI_SLOT(pdev->devfn);
2812         hw->bus.func = PCI_FUNC(pdev->devfn);
2813         hw->bus.bus_id = pdev->bus->number;
2814
2815         /* set up the locks for the AQ, do this only once in probe
2816          * and destroy them only once in remove
2817          */
2818         mutex_init(&hw->aq.asq_mutex);
2819         mutex_init(&hw->aq.arq_mutex);
2820
2821         INIT_LIST_HEAD(&adapter->mac_filter_list);
2822         INIT_LIST_HEAD(&adapter->vlan_filter_list);
2823
2824         INIT_WORK(&adapter->reset_task, i40evf_reset_task);
2825         INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
2826         INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
2827         INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
2828         INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
2829         schedule_delayed_work(&adapter->init_task,
2830                               msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
2831
2832         return 0;
2833
2834 err_ioremap:
2835         free_netdev(netdev);
2836 err_alloc_etherdev:
2837         pci_release_regions(pdev);
2838 err_pci_reg:
2839 err_dma:
2840         pci_disable_device(pdev);
2841         return err;
2842 }
2843
2844 #ifdef CONFIG_PM
2845 /**
2846  * i40evf_suspend - Power management suspend routine
2847  * @pdev: PCI device information struct
2848  * @state: unused
2849  *
2850  * Called when the system (VM) is entering sleep/suspend.
2851  **/
2852 static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
2853 {
2854         struct net_device *netdev = pci_get_drvdata(pdev);
2855         struct i40evf_adapter *adapter = netdev_priv(netdev);
2856         int retval = 0;
2857
2858         netif_device_detach(netdev);
2859
2860         if (netif_running(netdev)) {
2861                 rtnl_lock();
2862                 i40evf_down(adapter);
2863                 rtnl_unlock();
2864         }
2865         i40evf_free_misc_irq(adapter);
2866         i40evf_reset_interrupt_capability(adapter);
2867
2868         retval = pci_save_state(pdev);
2869         if (retval)
2870                 return retval;
2871
2872         pci_disable_device(pdev);
2873
2874         return 0;
2875 }
2876
2877 /**
2878  * i40evf_resume - Power management resume routine
2879  * @pdev: PCI device information struct
2880  *
2881  * Called when the system (VM) is resumed from sleep/suspend.
2882  **/
2883 static int i40evf_resume(struct pci_dev *pdev)
2884 {
2885         struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
2886         struct net_device *netdev = adapter->netdev;
2887         u32 err;
2888
2889         pci_set_power_state(pdev, PCI_D0);
2890         pci_restore_state(pdev);
2891         /* pci_restore_state clears dev->state_saved so call
2892          * pci_save_state to restore it.
2893          */
2894         pci_save_state(pdev);
2895
2896         err = pci_enable_device_mem(pdev);
2897         if (err) {
2898                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
2899                 return err;
2900         }
2901         pci_set_master(pdev);
2902
2903         rtnl_lock();
2904         err = i40evf_set_interrupt_capability(adapter);
2905         if (err) {
2906                 rtnl_unlock();
2907                 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
2908                 return err;
2909         }
2910         err = i40evf_request_misc_irq(adapter);
2911         rtnl_unlock();
2912         if (err) {
2913                 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
2914                 return err;
2915         }
2916
2917         schedule_work(&adapter->reset_task);
2918
2919         netif_device_attach(netdev);
2920
2921         return err;
2922 }
2923
2924 #endif /* CONFIG_PM */
2925 /**
2926  * i40evf_remove - Device Removal Routine
2927  * @pdev: PCI device information struct
2928  *
2929  * i40evf_remove is called by the PCI subsystem to alert the driver
2930  * that it should release a PCI device.  The could be caused by a
2931  * Hot-Plug event, or because the driver is going to be removed from
2932  * memory.
2933  **/
2934 static void i40evf_remove(struct pci_dev *pdev)
2935 {
2936         struct net_device *netdev = pci_get_drvdata(pdev);
2937         struct i40evf_adapter *adapter = netdev_priv(netdev);
2938         struct i40evf_mac_filter *f, *ftmp;
2939         struct i40e_hw *hw = &adapter->hw;
2940         int err;
2941
2942         cancel_delayed_work_sync(&adapter->init_task);
2943         cancel_work_sync(&adapter->reset_task);
2944         cancel_delayed_work_sync(&adapter->client_task);
2945         if (adapter->netdev_registered) {
2946                 unregister_netdev(netdev);
2947                 adapter->netdev_registered = false;
2948         }
2949         if (CLIENT_ALLOWED(adapter)) {
2950                 err = i40evf_lan_del_device(adapter);
2951                 if (err)
2952                         dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
2953                                  err);
2954         }
2955
2956         /* Shut down all the garbage mashers on the detention level */
2957         adapter->state = __I40EVF_REMOVE;
2958         adapter->aq_required = 0;
2959         i40evf_request_reset(adapter);
2960         msleep(50);
2961         /* If the FW isn't responding, kick it once, but only once. */
2962         if (!i40evf_asq_done(hw)) {
2963                 i40evf_request_reset(adapter);
2964                 msleep(50);
2965         }
2966         i40evf_free_all_tx_resources(adapter);
2967         i40evf_free_all_rx_resources(adapter);
2968         i40evf_misc_irq_disable(adapter);
2969         i40evf_free_misc_irq(adapter);
2970         i40evf_reset_interrupt_capability(adapter);
2971         i40evf_free_q_vectors(adapter);
2972
2973         if (adapter->watchdog_timer.function)
2974                 del_timer_sync(&adapter->watchdog_timer);
2975
2976         flush_scheduled_work();
2977
2978         i40evf_free_rss(adapter);
2979
2980         if (hw->aq.asq.count)
2981                 i40evf_shutdown_adminq(hw);
2982
2983         /* destroy the locks only once, here */
2984         mutex_destroy(&hw->aq.arq_mutex);
2985         mutex_destroy(&hw->aq.asq_mutex);
2986
2987         iounmap(hw->hw_addr);
2988         pci_release_regions(pdev);
2989         i40evf_free_all_tx_resources(adapter);
2990         i40evf_free_all_rx_resources(adapter);
2991         i40evf_free_queues(adapter);
2992         kfree(adapter->vf_res);
2993         /* If we got removed before an up/down sequence, we've got a filter
2994          * hanging out there that we need to get rid of.
2995          */
2996         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2997                 list_del(&f->list);
2998                 kfree(f);
2999         }
3000         list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
3001                 list_del(&f->list);
3002                 kfree(f);
3003         }
3004
3005         free_netdev(netdev);
3006
3007         pci_disable_pcie_error_reporting(pdev);
3008
3009         pci_disable_device(pdev);
3010 }
3011
3012 static struct pci_driver i40evf_driver = {
3013         .name     = i40evf_driver_name,
3014         .id_table = i40evf_pci_tbl,
3015         .probe    = i40evf_probe,
3016         .remove   = i40evf_remove,
3017 #ifdef CONFIG_PM
3018         .suspend  = i40evf_suspend,
3019         .resume   = i40evf_resume,
3020 #endif
3021         .shutdown = i40evf_shutdown,
3022 };
3023
3024 /**
3025  * i40e_init_module - Driver Registration Routine
3026  *
3027  * i40e_init_module is the first routine called when the driver is
3028  * loaded. All it does is register with the PCI subsystem.
3029  **/
3030 static int __init i40evf_init_module(void)
3031 {
3032         int ret;
3033
3034         pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
3035                 i40evf_driver_version);
3036
3037         pr_info("%s\n", i40evf_copyright);
3038
3039         i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3040                                     i40evf_driver_name);
3041         if (!i40evf_wq) {
3042                 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
3043                 return -ENOMEM;
3044         }
3045         ret = pci_register_driver(&i40evf_driver);
3046         return ret;
3047 }
3048
3049 module_init(i40evf_init_module);
3050
3051 /**
3052  * i40e_exit_module - Driver Exit Cleanup Routine
3053  *
3054  * i40e_exit_module is called just before the driver is removed
3055  * from memory.
3056  **/
3057 static void __exit i40evf_exit_module(void)
3058 {
3059         pci_unregister_driver(&i40evf_driver);
3060         destroy_workqueue(i40evf_wq);
3061 }
3062
3063 module_exit(i40evf_exit_module);
3064
3065 /* i40evf_main.c */