Merge branches 'pm-cpuidle', 'pm-core' and 'pm-sleep'
[linux-block.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8  * be included exactly once across the whole kernel with
9  * CREATE_TRACE_POINTS defined
10  */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22         "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25         "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28  *
29  * Wildcard entries (PCI_ANY_ID) should come last
30  * Last entry must be all 0s
31  *
32  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33  *   Class, Class Mask, private data (not used) }
34  */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40         /* required last entry */
41         {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52
53 int iavf_status_to_errno(enum iavf_status status)
54 {
55         switch (status) {
56         case IAVF_SUCCESS:
57                 return 0;
58         case IAVF_ERR_PARAM:
59         case IAVF_ERR_MAC_TYPE:
60         case IAVF_ERR_INVALID_MAC_ADDR:
61         case IAVF_ERR_INVALID_LINK_SETTINGS:
62         case IAVF_ERR_INVALID_PD_ID:
63         case IAVF_ERR_INVALID_QP_ID:
64         case IAVF_ERR_INVALID_CQ_ID:
65         case IAVF_ERR_INVALID_CEQ_ID:
66         case IAVF_ERR_INVALID_AEQ_ID:
67         case IAVF_ERR_INVALID_SIZE:
68         case IAVF_ERR_INVALID_ARP_INDEX:
69         case IAVF_ERR_INVALID_FPM_FUNC_ID:
70         case IAVF_ERR_QP_INVALID_MSG_SIZE:
71         case IAVF_ERR_INVALID_FRAG_COUNT:
72         case IAVF_ERR_INVALID_ALIGNMENT:
73         case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
74         case IAVF_ERR_INVALID_IMM_DATA_SIZE:
75         case IAVF_ERR_INVALID_VF_ID:
76         case IAVF_ERR_INVALID_HMCFN_ID:
77         case IAVF_ERR_INVALID_PBLE_INDEX:
78         case IAVF_ERR_INVALID_SD_INDEX:
79         case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
80         case IAVF_ERR_INVALID_SD_TYPE:
81         case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
82         case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
83         case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
84                 return -EINVAL;
85         case IAVF_ERR_NVM:
86         case IAVF_ERR_NVM_CHECKSUM:
87         case IAVF_ERR_PHY:
88         case IAVF_ERR_CONFIG:
89         case IAVF_ERR_UNKNOWN_PHY:
90         case IAVF_ERR_LINK_SETUP:
91         case IAVF_ERR_ADAPTER_STOPPED:
92         case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
93         case IAVF_ERR_AUTONEG_NOT_COMPLETE:
94         case IAVF_ERR_RESET_FAILED:
95         case IAVF_ERR_BAD_PTR:
96         case IAVF_ERR_SWFW_SYNC:
97         case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
98         case IAVF_ERR_QUEUE_EMPTY:
99         case IAVF_ERR_FLUSHED_QUEUE:
100         case IAVF_ERR_OPCODE_MISMATCH:
101         case IAVF_ERR_CQP_COMPL_ERROR:
102         case IAVF_ERR_BACKING_PAGE_ERROR:
103         case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
104         case IAVF_ERR_MEMCPY_FAILED:
105         case IAVF_ERR_SRQ_ENABLED:
106         case IAVF_ERR_ADMIN_QUEUE_ERROR:
107         case IAVF_ERR_ADMIN_QUEUE_FULL:
108         case IAVF_ERR_BAD_IWARP_CQE:
109         case IAVF_ERR_NVM_BLANK_MODE:
110         case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
111         case IAVF_ERR_DIAG_TEST_FAILED:
112         case IAVF_ERR_FIRMWARE_API_VERSION:
113         case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
114                 return -EIO;
115         case IAVF_ERR_DEVICE_NOT_SUPPORTED:
116                 return -ENODEV;
117         case IAVF_ERR_NO_AVAILABLE_VSI:
118         case IAVF_ERR_RING_FULL:
119                 return -ENOSPC;
120         case IAVF_ERR_NO_MEMORY:
121                 return -ENOMEM;
122         case IAVF_ERR_TIMEOUT:
123         case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
124                 return -ETIMEDOUT;
125         case IAVF_ERR_NOT_IMPLEMENTED:
126         case IAVF_NOT_SUPPORTED:
127                 return -EOPNOTSUPP;
128         case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
129                 return -EALREADY;
130         case IAVF_ERR_NOT_READY:
131                 return -EBUSY;
132         case IAVF_ERR_BUF_TOO_SHORT:
133                 return -EMSGSIZE;
134         }
135
136         return -EIO;
137 }
138
139 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
140 {
141         switch (v_status) {
142         case VIRTCHNL_STATUS_SUCCESS:
143                 return 0;
144         case VIRTCHNL_STATUS_ERR_PARAM:
145         case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
146                 return -EINVAL;
147         case VIRTCHNL_STATUS_ERR_NO_MEMORY:
148                 return -ENOMEM;
149         case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
150         case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
151         case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
152                 return -EIO;
153         case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
154                 return -EOPNOTSUPP;
155         }
156
157         return -EIO;
158 }
159
160 /**
161  * iavf_pdev_to_adapter - go from pci_dev to adapter
162  * @pdev: pci_dev pointer
163  */
164 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
165 {
166         return netdev_priv(pci_get_drvdata(pdev));
167 }
168
169 /**
170  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
171  * @hw:   pointer to the HW structure
172  * @mem:  ptr to mem struct to fill out
173  * @size: size of memory requested
174  * @alignment: what to align the allocation to
175  **/
176 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
177                                          struct iavf_dma_mem *mem,
178                                          u64 size, u32 alignment)
179 {
180         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
181
182         if (!mem)
183                 return IAVF_ERR_PARAM;
184
185         mem->size = ALIGN(size, alignment);
186         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
187                                      (dma_addr_t *)&mem->pa, GFP_KERNEL);
188         if (mem->va)
189                 return 0;
190         else
191                 return IAVF_ERR_NO_MEMORY;
192 }
193
194 /**
195  * iavf_free_dma_mem_d - OS specific memory free for shared code
196  * @hw:   pointer to the HW structure
197  * @mem:  ptr to mem struct to free
198  **/
199 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
200                                      struct iavf_dma_mem *mem)
201 {
202         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
203
204         if (!mem || !mem->va)
205                 return IAVF_ERR_PARAM;
206         dma_free_coherent(&adapter->pdev->dev, mem->size,
207                           mem->va, (dma_addr_t)mem->pa);
208         return 0;
209 }
210
211 /**
212  * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
213  * @hw:   pointer to the HW structure
214  * @mem:  ptr to mem struct to fill out
215  * @size: size of memory requested
216  **/
217 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
218                                           struct iavf_virt_mem *mem, u32 size)
219 {
220         if (!mem)
221                 return IAVF_ERR_PARAM;
222
223         mem->size = size;
224         mem->va = kzalloc(size, GFP_KERNEL);
225
226         if (mem->va)
227                 return 0;
228         else
229                 return IAVF_ERR_NO_MEMORY;
230 }
231
232 /**
233  * iavf_free_virt_mem_d - OS specific memory free for shared code
234  * @hw:   pointer to the HW structure
235  * @mem:  ptr to mem struct to free
236  **/
237 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
238                                       struct iavf_virt_mem *mem)
239 {
240         if (!mem)
241                 return IAVF_ERR_PARAM;
242
243         /* it's ok to kfree a NULL pointer */
244         kfree(mem->va);
245
246         return 0;
247 }
248
249 /**
250  * iavf_lock_timeout - try to lock mutex but give up after timeout
251  * @lock: mutex that should be locked
252  * @msecs: timeout in msecs
253  *
254  * Returns 0 on success, negative on failure
255  **/
256 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
257 {
258         unsigned int wait, delay = 10;
259
260         for (wait = 0; wait < msecs; wait += delay) {
261                 if (mutex_trylock(lock))
262                         return 0;
263
264                 msleep(delay);
265         }
266
267         return -1;
268 }
269
270 /**
271  * iavf_schedule_reset - Set the flags and schedule a reset event
272  * @adapter: board private structure
273  **/
274 void iavf_schedule_reset(struct iavf_adapter *adapter)
275 {
276         if (!(adapter->flags &
277               (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
278                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
279                 queue_work(adapter->wq, &adapter->reset_task);
280         }
281 }
282
283 /**
284  * iavf_schedule_request_stats - Set the flags and schedule statistics request
285  * @adapter: board private structure
286  *
287  * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
288  * request and refresh ethtool stats
289  **/
290 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
291 {
292         adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
293         mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
294 }
295
296 /**
297  * iavf_tx_timeout - Respond to a Tx Hang
298  * @netdev: network interface device structure
299  * @txqueue: queue number that is timing out
300  **/
301 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
302 {
303         struct iavf_adapter *adapter = netdev_priv(netdev);
304
305         adapter->tx_timeout_count++;
306         iavf_schedule_reset(adapter);
307 }
308
309 /**
310  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
311  * @adapter: board private structure
312  **/
313 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
314 {
315         struct iavf_hw *hw = &adapter->hw;
316
317         if (!adapter->msix_entries)
318                 return;
319
320         wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
321
322         iavf_flush(hw);
323
324         synchronize_irq(adapter->msix_entries[0].vector);
325 }
326
327 /**
328  * iavf_misc_irq_enable - Enable default interrupt generation settings
329  * @adapter: board private structure
330  **/
331 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
332 {
333         struct iavf_hw *hw = &adapter->hw;
334
335         wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
336                                        IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
337         wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
338
339         iavf_flush(hw);
340 }
341
342 /**
343  * iavf_irq_disable - Mask off interrupt generation on the NIC
344  * @adapter: board private structure
345  **/
346 static void iavf_irq_disable(struct iavf_adapter *adapter)
347 {
348         int i;
349         struct iavf_hw *hw = &adapter->hw;
350
351         if (!adapter->msix_entries)
352                 return;
353
354         for (i = 1; i < adapter->num_msix_vectors; i++) {
355                 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
356                 synchronize_irq(adapter->msix_entries[i].vector);
357         }
358         iavf_flush(hw);
359 }
360
361 /**
362  * iavf_irq_enable_queues - Enable interrupt for specified queues
363  * @adapter: board private structure
364  * @mask: bitmap of queues to enable
365  **/
366 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
367 {
368         struct iavf_hw *hw = &adapter->hw;
369         int i;
370
371         for (i = 1; i < adapter->num_msix_vectors; i++) {
372                 if (mask & BIT(i - 1)) {
373                         wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
374                              IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
375                              IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
376                 }
377         }
378 }
379
380 /**
381  * iavf_irq_enable - Enable default interrupt generation settings
382  * @adapter: board private structure
383  * @flush: boolean value whether to run rd32()
384  **/
385 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
386 {
387         struct iavf_hw *hw = &adapter->hw;
388
389         iavf_misc_irq_enable(adapter);
390         iavf_irq_enable_queues(adapter, ~0);
391
392         if (flush)
393                 iavf_flush(hw);
394 }
395
396 /**
397  * iavf_msix_aq - Interrupt handler for vector 0
398  * @irq: interrupt number
399  * @data: pointer to netdev
400  **/
401 static irqreturn_t iavf_msix_aq(int irq, void *data)
402 {
403         struct net_device *netdev = data;
404         struct iavf_adapter *adapter = netdev_priv(netdev);
405         struct iavf_hw *hw = &adapter->hw;
406
407         /* handle non-queue interrupts, these reads clear the registers */
408         rd32(hw, IAVF_VFINT_ICR01);
409         rd32(hw, IAVF_VFINT_ICR0_ENA1);
410
411         if (adapter->state != __IAVF_REMOVE)
412                 /* schedule work on the private workqueue */
413                 queue_work(adapter->wq, &adapter->adminq_task);
414
415         return IRQ_HANDLED;
416 }
417
418 /**
419  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
420  * @irq: interrupt number
421  * @data: pointer to a q_vector
422  **/
423 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
424 {
425         struct iavf_q_vector *q_vector = data;
426
427         if (!q_vector->tx.ring && !q_vector->rx.ring)
428                 return IRQ_HANDLED;
429
430         napi_schedule_irqoff(&q_vector->napi);
431
432         return IRQ_HANDLED;
433 }
434
435 /**
436  * iavf_map_vector_to_rxq - associate irqs with rx queues
437  * @adapter: board private structure
438  * @v_idx: interrupt number
439  * @r_idx: queue number
440  **/
441 static void
442 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
443 {
444         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
445         struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
446         struct iavf_hw *hw = &adapter->hw;
447
448         rx_ring->q_vector = q_vector;
449         rx_ring->next = q_vector->rx.ring;
450         rx_ring->vsi = &adapter->vsi;
451         q_vector->rx.ring = rx_ring;
452         q_vector->rx.count++;
453         q_vector->rx.next_update = jiffies + 1;
454         q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
455         q_vector->ring_mask |= BIT(r_idx);
456         wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
457              q_vector->rx.current_itr >> 1);
458         q_vector->rx.current_itr = q_vector->rx.target_itr;
459 }
460
461 /**
462  * iavf_map_vector_to_txq - associate irqs with tx queues
463  * @adapter: board private structure
464  * @v_idx: interrupt number
465  * @t_idx: queue number
466  **/
467 static void
468 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
469 {
470         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
471         struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
472         struct iavf_hw *hw = &adapter->hw;
473
474         tx_ring->q_vector = q_vector;
475         tx_ring->next = q_vector->tx.ring;
476         tx_ring->vsi = &adapter->vsi;
477         q_vector->tx.ring = tx_ring;
478         q_vector->tx.count++;
479         q_vector->tx.next_update = jiffies + 1;
480         q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
481         q_vector->num_ringpairs++;
482         wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
483              q_vector->tx.target_itr >> 1);
484         q_vector->tx.current_itr = q_vector->tx.target_itr;
485 }
486
487 /**
488  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
489  * @adapter: board private structure to initialize
490  *
491  * This function maps descriptor rings to the queue-specific vectors
492  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
493  * one vector per ring/queue, but on a constrained vector budget, we
494  * group the rings as "efficiently" as possible.  You would add new
495  * mapping configurations in here.
496  **/
497 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
498 {
499         int rings_remaining = adapter->num_active_queues;
500         int ridx = 0, vidx = 0;
501         int q_vectors;
502
503         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
504
505         for (; ridx < rings_remaining; ridx++) {
506                 iavf_map_vector_to_rxq(adapter, vidx, ridx);
507                 iavf_map_vector_to_txq(adapter, vidx, ridx);
508
509                 /* In the case where we have more queues than vectors, continue
510                  * round-robin on vectors until all queues are mapped.
511                  */
512                 if (++vidx >= q_vectors)
513                         vidx = 0;
514         }
515
516         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
517 }
518
519 /**
520  * iavf_irq_affinity_notify - Callback for affinity changes
521  * @notify: context as to what irq was changed
522  * @mask: the new affinity mask
523  *
524  * This is a callback function used by the irq_set_affinity_notifier function
525  * so that we may register to receive changes to the irq affinity masks.
526  **/
527 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
528                                      const cpumask_t *mask)
529 {
530         struct iavf_q_vector *q_vector =
531                 container_of(notify, struct iavf_q_vector, affinity_notify);
532
533         cpumask_copy(&q_vector->affinity_mask, mask);
534 }
535
536 /**
537  * iavf_irq_affinity_release - Callback for affinity notifier release
538  * @ref: internal core kernel usage
539  *
540  * This is a callback function used by the irq_set_affinity_notifier function
541  * to inform the current notification subscriber that they will no longer
542  * receive notifications.
543  **/
544 static void iavf_irq_affinity_release(struct kref *ref) {}
545
546 /**
547  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
548  * @adapter: board private structure
549  * @basename: device basename
550  *
551  * Allocates MSI-X vectors for tx and rx handling, and requests
552  * interrupts from the kernel.
553  **/
554 static int
555 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
556 {
557         unsigned int vector, q_vectors;
558         unsigned int rx_int_idx = 0, tx_int_idx = 0;
559         int irq_num, err;
560         int cpu;
561
562         iavf_irq_disable(adapter);
563         /* Decrement for Other and TCP Timer vectors */
564         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
565
566         for (vector = 0; vector < q_vectors; vector++) {
567                 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
568
569                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
570
571                 if (q_vector->tx.ring && q_vector->rx.ring) {
572                         snprintf(q_vector->name, sizeof(q_vector->name),
573                                  "iavf-%s-TxRx-%u", basename, rx_int_idx++);
574                         tx_int_idx++;
575                 } else if (q_vector->rx.ring) {
576                         snprintf(q_vector->name, sizeof(q_vector->name),
577                                  "iavf-%s-rx-%u", basename, rx_int_idx++);
578                 } else if (q_vector->tx.ring) {
579                         snprintf(q_vector->name, sizeof(q_vector->name),
580                                  "iavf-%s-tx-%u", basename, tx_int_idx++);
581                 } else {
582                         /* skip this unused q_vector */
583                         continue;
584                 }
585                 err = request_irq(irq_num,
586                                   iavf_msix_clean_rings,
587                                   0,
588                                   q_vector->name,
589                                   q_vector);
590                 if (err) {
591                         dev_info(&adapter->pdev->dev,
592                                  "Request_irq failed, error: %d\n", err);
593                         goto free_queue_irqs;
594                 }
595                 /* register for affinity change notifications */
596                 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
597                 q_vector->affinity_notify.release =
598                                                    iavf_irq_affinity_release;
599                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
600                 /* Spread the IRQ affinity hints across online CPUs. Note that
601                  * get_cpu_mask returns a mask with a permanent lifetime so
602                  * it's safe to use as a hint for irq_update_affinity_hint.
603                  */
604                 cpu = cpumask_local_spread(q_vector->v_idx, -1);
605                 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
606         }
607
608         return 0;
609
610 free_queue_irqs:
611         while (vector) {
612                 vector--;
613                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
614                 irq_set_affinity_notifier(irq_num, NULL);
615                 irq_update_affinity_hint(irq_num, NULL);
616                 free_irq(irq_num, &adapter->q_vectors[vector]);
617         }
618         return err;
619 }
620
621 /**
622  * iavf_request_misc_irq - Initialize MSI-X interrupts
623  * @adapter: board private structure
624  *
625  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
626  * vector is only for the admin queue, and stays active even when the netdev
627  * is closed.
628  **/
629 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
630 {
631         struct net_device *netdev = adapter->netdev;
632         int err;
633
634         snprintf(adapter->misc_vector_name,
635                  sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
636                  dev_name(&adapter->pdev->dev));
637         err = request_irq(adapter->msix_entries[0].vector,
638                           &iavf_msix_aq, 0,
639                           adapter->misc_vector_name, netdev);
640         if (err) {
641                 dev_err(&adapter->pdev->dev,
642                         "request_irq for %s failed: %d\n",
643                         adapter->misc_vector_name, err);
644                 free_irq(adapter->msix_entries[0].vector, netdev);
645         }
646         return err;
647 }
648
649 /**
650  * iavf_free_traffic_irqs - Free MSI-X interrupts
651  * @adapter: board private structure
652  *
653  * Frees all MSI-X vectors other than 0.
654  **/
655 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
656 {
657         int vector, irq_num, q_vectors;
658
659         if (!adapter->msix_entries)
660                 return;
661
662         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
663
664         for (vector = 0; vector < q_vectors; vector++) {
665                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
666                 irq_set_affinity_notifier(irq_num, NULL);
667                 irq_update_affinity_hint(irq_num, NULL);
668                 free_irq(irq_num, &adapter->q_vectors[vector]);
669         }
670 }
671
672 /**
673  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
674  * @adapter: board private structure
675  *
676  * Frees MSI-X vector 0.
677  **/
678 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
679 {
680         struct net_device *netdev = adapter->netdev;
681
682         if (!adapter->msix_entries)
683                 return;
684
685         free_irq(adapter->msix_entries[0].vector, netdev);
686 }
687
688 /**
689  * iavf_configure_tx - Configure Transmit Unit after Reset
690  * @adapter: board private structure
691  *
692  * Configure the Tx unit of the MAC after a reset.
693  **/
694 static void iavf_configure_tx(struct iavf_adapter *adapter)
695 {
696         struct iavf_hw *hw = &adapter->hw;
697         int i;
698
699         for (i = 0; i < adapter->num_active_queues; i++)
700                 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
701 }
702
703 /**
704  * iavf_configure_rx - Configure Receive Unit after Reset
705  * @adapter: board private structure
706  *
707  * Configure the Rx unit of the MAC after a reset.
708  **/
709 static void iavf_configure_rx(struct iavf_adapter *adapter)
710 {
711         unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
712         struct iavf_hw *hw = &adapter->hw;
713         int i;
714
715         /* Legacy Rx will always default to a 2048 buffer size. */
716 #if (PAGE_SIZE < 8192)
717         if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
718                 struct net_device *netdev = adapter->netdev;
719
720                 /* For jumbo frames on systems with 4K pages we have to use
721                  * an order 1 page, so we might as well increase the size
722                  * of our Rx buffer to make better use of the available space
723                  */
724                 rx_buf_len = IAVF_RXBUFFER_3072;
725
726                 /* We use a 1536 buffer size for configurations with
727                  * standard Ethernet mtu.  On x86 this gives us enough room
728                  * for shared info and 192 bytes of padding.
729                  */
730                 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
731                     (netdev->mtu <= ETH_DATA_LEN))
732                         rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
733         }
734 #endif
735
736         for (i = 0; i < adapter->num_active_queues; i++) {
737                 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
738                 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
739
740                 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
741                         clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
742                 else
743                         set_ring_build_skb_enabled(&adapter->rx_rings[i]);
744         }
745 }
746
747 /**
748  * iavf_find_vlan - Search filter list for specific vlan filter
749  * @adapter: board private structure
750  * @vlan: vlan tag
751  *
752  * Returns ptr to the filter object or NULL. Must be called while holding the
753  * mac_vlan_list_lock.
754  **/
755 static struct
756 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
757                                  struct iavf_vlan vlan)
758 {
759         struct iavf_vlan_filter *f;
760
761         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
762                 if (f->vlan.vid == vlan.vid &&
763                     f->vlan.tpid == vlan.tpid)
764                         return f;
765         }
766
767         return NULL;
768 }
769
770 /**
771  * iavf_add_vlan - Add a vlan filter to the list
772  * @adapter: board private structure
773  * @vlan: VLAN tag
774  *
775  * Returns ptr to the filter object or NULL when no memory available.
776  **/
777 static struct
778 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
779                                 struct iavf_vlan vlan)
780 {
781         struct iavf_vlan_filter *f = NULL;
782
783         spin_lock_bh(&adapter->mac_vlan_list_lock);
784
785         f = iavf_find_vlan(adapter, vlan);
786         if (!f) {
787                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
788                 if (!f)
789                         goto clearout;
790
791                 f->vlan = vlan;
792
793                 list_add_tail(&f->list, &adapter->vlan_filter_list);
794                 f->add = true;
795                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
796         }
797
798 clearout:
799         spin_unlock_bh(&adapter->mac_vlan_list_lock);
800         return f;
801 }
802
803 /**
804  * iavf_del_vlan - Remove a vlan filter from the list
805  * @adapter: board private structure
806  * @vlan: VLAN tag
807  **/
808 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
809 {
810         struct iavf_vlan_filter *f;
811
812         spin_lock_bh(&adapter->mac_vlan_list_lock);
813
814         f = iavf_find_vlan(adapter, vlan);
815         if (f) {
816                 f->remove = true;
817                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
818         }
819
820         spin_unlock_bh(&adapter->mac_vlan_list_lock);
821 }
822
823 /**
824  * iavf_restore_filters
825  * @adapter: board private structure
826  *
827  * Restore existing non MAC filters when VF netdev comes back up
828  **/
829 static void iavf_restore_filters(struct iavf_adapter *adapter)
830 {
831         u16 vid;
832
833         /* re-add all VLAN filters */
834         for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
835                 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
836
837         for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
838                 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
839 }
840
841 /**
842  * iavf_get_num_vlans_added - get number of VLANs added
843  * @adapter: board private structure
844  */
845 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
846 {
847         return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
848                 bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
849 }
850
851 /**
852  * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
853  * @adapter: board private structure
854  *
855  * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
856  * do not impose a limit as that maintains current behavior and for
857  * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
858  **/
859 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
860 {
861         /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
862          * never been a limit on the VF driver side
863          */
864         if (VLAN_ALLOWED(adapter))
865                 return VLAN_N_VID;
866         else if (VLAN_V2_ALLOWED(adapter))
867                 return adapter->vlan_v2_caps.filtering.max_filters;
868
869         return 0;
870 }
871
872 /**
873  * iavf_max_vlans_added - check if maximum VLANs allowed already exist
874  * @adapter: board private structure
875  **/
876 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
877 {
878         if (iavf_get_num_vlans_added(adapter) <
879             iavf_get_max_vlans_allowed(adapter))
880                 return false;
881
882         return true;
883 }
884
885 /**
886  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
887  * @netdev: network device struct
888  * @proto: unused protocol data
889  * @vid: VLAN tag
890  **/
891 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
892                                 __always_unused __be16 proto, u16 vid)
893 {
894         struct iavf_adapter *adapter = netdev_priv(netdev);
895
896         if (!VLAN_FILTERING_ALLOWED(adapter))
897                 return -EIO;
898
899         if (iavf_max_vlans_added(adapter)) {
900                 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
901                            iavf_get_max_vlans_allowed(adapter));
902                 return -EIO;
903         }
904
905         if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
906                 return -ENOMEM;
907
908         return 0;
909 }
910
911 /**
912  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
913  * @netdev: network device struct
914  * @proto: unused protocol data
915  * @vid: VLAN tag
916  **/
917 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
918                                  __always_unused __be16 proto, u16 vid)
919 {
920         struct iavf_adapter *adapter = netdev_priv(netdev);
921
922         iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
923         if (proto == cpu_to_be16(ETH_P_8021Q))
924                 clear_bit(vid, adapter->vsi.active_cvlans);
925         else
926                 clear_bit(vid, adapter->vsi.active_svlans);
927
928         return 0;
929 }
930
931 /**
932  * iavf_find_filter - Search filter list for specific mac filter
933  * @adapter: board private structure
934  * @macaddr: the MAC address
935  *
936  * Returns ptr to the filter object or NULL. Must be called while holding the
937  * mac_vlan_list_lock.
938  **/
939 static struct
940 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
941                                   const u8 *macaddr)
942 {
943         struct iavf_mac_filter *f;
944
945         if (!macaddr)
946                 return NULL;
947
948         list_for_each_entry(f, &adapter->mac_filter_list, list) {
949                 if (ether_addr_equal(macaddr, f->macaddr))
950                         return f;
951         }
952         return NULL;
953 }
954
955 /**
956  * iavf_add_filter - Add a mac filter to the filter list
957  * @adapter: board private structure
958  * @macaddr: the MAC address
959  *
960  * Returns ptr to the filter object or NULL when no memory available.
961  **/
962 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
963                                         const u8 *macaddr)
964 {
965         struct iavf_mac_filter *f;
966
967         if (!macaddr)
968                 return NULL;
969
970         f = iavf_find_filter(adapter, macaddr);
971         if (!f) {
972                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
973                 if (!f)
974                         return f;
975
976                 ether_addr_copy(f->macaddr, macaddr);
977
978                 list_add_tail(&f->list, &adapter->mac_filter_list);
979                 f->add = true;
980                 f->add_handled = false;
981                 f->is_new_mac = true;
982                 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
983                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
984         } else {
985                 f->remove = false;
986         }
987
988         return f;
989 }
990
991 /**
992  * iavf_replace_primary_mac - Replace current primary address
993  * @adapter: board private structure
994  * @new_mac: new MAC address to be applied
995  *
996  * Replace current dev_addr and send request to PF for removal of previous
997  * primary MAC address filter and addition of new primary MAC filter.
998  * Return 0 for success, -ENOMEM for failure.
999  *
1000  * Do not call this with mac_vlan_list_lock!
1001  **/
1002 int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1003                              const u8 *new_mac)
1004 {
1005         struct iavf_hw *hw = &adapter->hw;
1006         struct iavf_mac_filter *f;
1007
1008         spin_lock_bh(&adapter->mac_vlan_list_lock);
1009
1010         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1011                 f->is_primary = false;
1012         }
1013
1014         f = iavf_find_filter(adapter, hw->mac.addr);
1015         if (f) {
1016                 f->remove = true;
1017                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1018         }
1019
1020         f = iavf_add_filter(adapter, new_mac);
1021
1022         if (f) {
1023                 /* Always send the request to add if changing primary MAC
1024                  * even if filter is already present on the list
1025                  */
1026                 f->is_primary = true;
1027                 f->add = true;
1028                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1029                 ether_addr_copy(hw->mac.addr, new_mac);
1030         }
1031
1032         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1033
1034         /* schedule the watchdog task to immediately process the request */
1035         if (f) {
1036                 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1037                 return 0;
1038         }
1039         return -ENOMEM;
1040 }
1041
1042 /**
1043  * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1044  * @netdev: network interface device structure
1045  * @macaddr: MAC address to set
1046  *
1047  * Returns true on success, false on failure
1048  */
1049 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1050                                     const u8 *macaddr)
1051 {
1052         struct iavf_adapter *adapter = netdev_priv(netdev);
1053         struct iavf_mac_filter *f;
1054         bool ret = false;
1055
1056         spin_lock_bh(&adapter->mac_vlan_list_lock);
1057
1058         f = iavf_find_filter(adapter, macaddr);
1059
1060         if (!f || (!f->add && f->add_handled))
1061                 ret = true;
1062
1063         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1064
1065         return ret;
1066 }
1067
1068 /**
1069  * iavf_set_mac - NDO callback to set port MAC address
1070  * @netdev: network interface device structure
1071  * @p: pointer to an address structure
1072  *
1073  * Returns 0 on success, negative on failure
1074  */
1075 static int iavf_set_mac(struct net_device *netdev, void *p)
1076 {
1077         struct iavf_adapter *adapter = netdev_priv(netdev);
1078         struct sockaddr *addr = p;
1079         int ret;
1080
1081         if (!is_valid_ether_addr(addr->sa_data))
1082                 return -EADDRNOTAVAIL;
1083
1084         ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1085
1086         if (ret)
1087                 return ret;
1088
1089         ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1090                                                iavf_is_mac_set_handled(netdev, addr->sa_data),
1091                                                msecs_to_jiffies(2500));
1092
1093         /* If ret < 0 then it means wait was interrupted.
1094          * If ret == 0 then it means we got a timeout.
1095          * else it means we got response for set MAC from PF,
1096          * check if netdev MAC was updated to requested MAC,
1097          * if yes then set MAC succeeded otherwise it failed return -EACCES
1098          */
1099         if (ret < 0)
1100                 return ret;
1101
1102         if (!ret)
1103                 return -EAGAIN;
1104
1105         if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1106                 return -EACCES;
1107
1108         return 0;
1109 }
1110
1111 /**
1112  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1113  * @netdev: the netdevice
1114  * @addr: address to add
1115  *
1116  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1117  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1118  */
1119 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1120 {
1121         struct iavf_adapter *adapter = netdev_priv(netdev);
1122
1123         if (iavf_add_filter(adapter, addr))
1124                 return 0;
1125         else
1126                 return -ENOMEM;
1127 }
1128
1129 /**
1130  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1131  * @netdev: the netdevice
1132  * @addr: address to add
1133  *
1134  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1135  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1136  */
1137 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1138 {
1139         struct iavf_adapter *adapter = netdev_priv(netdev);
1140         struct iavf_mac_filter *f;
1141
1142         /* Under some circumstances, we might receive a request to delete
1143          * our own device address from our uc list. Because we store the
1144          * device address in the VSI's MAC/VLAN filter list, we need to ignore
1145          * such requests and not delete our device address from this list.
1146          */
1147         if (ether_addr_equal(addr, netdev->dev_addr))
1148                 return 0;
1149
1150         f = iavf_find_filter(adapter, addr);
1151         if (f) {
1152                 f->remove = true;
1153                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1154         }
1155         return 0;
1156 }
1157
1158 /**
1159  * iavf_set_rx_mode - NDO callback to set the netdev filters
1160  * @netdev: network interface device structure
1161  **/
1162 static void iavf_set_rx_mode(struct net_device *netdev)
1163 {
1164         struct iavf_adapter *adapter = netdev_priv(netdev);
1165
1166         spin_lock_bh(&adapter->mac_vlan_list_lock);
1167         __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1168         __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1169         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1170
1171         if (netdev->flags & IFF_PROMISC &&
1172             !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1173                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1174         else if (!(netdev->flags & IFF_PROMISC) &&
1175                  adapter->flags & IAVF_FLAG_PROMISC_ON)
1176                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1177
1178         if (netdev->flags & IFF_ALLMULTI &&
1179             !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1180                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1181         else if (!(netdev->flags & IFF_ALLMULTI) &&
1182                  adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1183                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1184 }
1185
1186 /**
1187  * iavf_napi_enable_all - enable NAPI on all queue vectors
1188  * @adapter: board private structure
1189  **/
1190 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1191 {
1192         int q_idx;
1193         struct iavf_q_vector *q_vector;
1194         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1195
1196         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1197                 struct napi_struct *napi;
1198
1199                 q_vector = &adapter->q_vectors[q_idx];
1200                 napi = &q_vector->napi;
1201                 napi_enable(napi);
1202         }
1203 }
1204
1205 /**
1206  * iavf_napi_disable_all - disable NAPI on all queue vectors
1207  * @adapter: board private structure
1208  **/
1209 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1210 {
1211         int q_idx;
1212         struct iavf_q_vector *q_vector;
1213         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1214
1215         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1216                 q_vector = &adapter->q_vectors[q_idx];
1217                 napi_disable(&q_vector->napi);
1218         }
1219 }
1220
1221 /**
1222  * iavf_configure - set up transmit and receive data structures
1223  * @adapter: board private structure
1224  **/
1225 static void iavf_configure(struct iavf_adapter *adapter)
1226 {
1227         struct net_device *netdev = adapter->netdev;
1228         int i;
1229
1230         iavf_set_rx_mode(netdev);
1231
1232         iavf_configure_tx(adapter);
1233         iavf_configure_rx(adapter);
1234         adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1235
1236         for (i = 0; i < adapter->num_active_queues; i++) {
1237                 struct iavf_ring *ring = &adapter->rx_rings[i];
1238
1239                 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1240         }
1241 }
1242
1243 /**
1244  * iavf_up_complete - Finish the last steps of bringing up a connection
1245  * @adapter: board private structure
1246  *
1247  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1248  **/
1249 static void iavf_up_complete(struct iavf_adapter *adapter)
1250 {
1251         iavf_change_state(adapter, __IAVF_RUNNING);
1252         clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1253
1254         iavf_napi_enable_all(adapter);
1255
1256         adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1257         if (CLIENT_ENABLED(adapter))
1258                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1259         mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1260 }
1261
1262 /**
1263  * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1264  * yet and mark other to be removed.
1265  * @adapter: board private structure
1266  **/
1267 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1268 {
1269         struct iavf_vlan_filter *vlf, *vlftmp;
1270         struct iavf_mac_filter *f, *ftmp;
1271
1272         spin_lock_bh(&adapter->mac_vlan_list_lock);
1273         /* clear the sync flag on all filters */
1274         __dev_uc_unsync(adapter->netdev, NULL);
1275         __dev_mc_unsync(adapter->netdev, NULL);
1276
1277         /* remove all MAC filters */
1278         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1279                                  list) {
1280                 if (f->add) {
1281                         list_del(&f->list);
1282                         kfree(f);
1283                 } else {
1284                         f->remove = true;
1285                 }
1286         }
1287
1288         /* remove all VLAN filters */
1289         list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1290                                  list) {
1291                 if (vlf->add) {
1292                         list_del(&vlf->list);
1293                         kfree(vlf);
1294                 } else {
1295                         vlf->remove = true;
1296                 }
1297         }
1298         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1299 }
1300
1301 /**
1302  * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1303  * mark other to be removed.
1304  * @adapter: board private structure
1305  **/
1306 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1307 {
1308         struct iavf_cloud_filter *cf, *cftmp;
1309
1310         /* remove all cloud filters */
1311         spin_lock_bh(&adapter->cloud_filter_list_lock);
1312         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1313                                  list) {
1314                 if (cf->add) {
1315                         list_del(&cf->list);
1316                         kfree(cf);
1317                         adapter->num_cloud_filters--;
1318                 } else {
1319                         cf->del = true;
1320                 }
1321         }
1322         spin_unlock_bh(&adapter->cloud_filter_list_lock);
1323 }
1324
1325 /**
1326  * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1327  * other to be removed.
1328  * @adapter: board private structure
1329  **/
1330 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1331 {
1332         struct iavf_fdir_fltr *fdir, *fdirtmp;
1333
1334         /* remove all Flow Director filters */
1335         spin_lock_bh(&adapter->fdir_fltr_lock);
1336         list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1337                                  list) {
1338                 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1339                         list_del(&fdir->list);
1340                         kfree(fdir);
1341                         adapter->fdir_active_fltr--;
1342                 } else {
1343                         fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1344                 }
1345         }
1346         spin_unlock_bh(&adapter->fdir_fltr_lock);
1347 }
1348
1349 /**
1350  * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1351  * other to be removed.
1352  * @adapter: board private structure
1353  **/
1354 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1355 {
1356         struct iavf_adv_rss *rss, *rsstmp;
1357
1358         /* remove all advance RSS configuration */
1359         spin_lock_bh(&adapter->adv_rss_lock);
1360         list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1361                                  list) {
1362                 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1363                         list_del(&rss->list);
1364                         kfree(rss);
1365                 } else {
1366                         rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1367                 }
1368         }
1369         spin_unlock_bh(&adapter->adv_rss_lock);
1370 }
1371
1372 /**
1373  * iavf_down - Shutdown the connection processing
1374  * @adapter: board private structure
1375  *
1376  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1377  **/
1378 void iavf_down(struct iavf_adapter *adapter)
1379 {
1380         struct net_device *netdev = adapter->netdev;
1381
1382         if (adapter->state <= __IAVF_DOWN_PENDING)
1383                 return;
1384
1385         netif_carrier_off(netdev);
1386         netif_tx_disable(netdev);
1387         adapter->link_up = false;
1388         iavf_napi_disable_all(adapter);
1389         iavf_irq_disable(adapter);
1390
1391         iavf_clear_mac_vlan_filters(adapter);
1392         iavf_clear_cloud_filters(adapter);
1393         iavf_clear_fdir_filters(adapter);
1394         iavf_clear_adv_rss_conf(adapter);
1395
1396         if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1397                 /* cancel any current operation */
1398                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1399                 /* Schedule operations to close down the HW. Don't wait
1400                  * here for this to complete. The watchdog is still running
1401                  * and it will take care of this.
1402                  */
1403                 if (!list_empty(&adapter->mac_filter_list))
1404                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1405                 if (!list_empty(&adapter->vlan_filter_list))
1406                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1407                 if (!list_empty(&adapter->cloud_filter_list))
1408                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1409                 if (!list_empty(&adapter->fdir_list_head))
1410                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1411                 if (!list_empty(&adapter->adv_rss_list_head))
1412                         adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1413                 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1414         }
1415
1416         mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1417 }
1418
1419 /**
1420  * iavf_acquire_msix_vectors - Setup the MSIX capability
1421  * @adapter: board private structure
1422  * @vectors: number of vectors to request
1423  *
1424  * Work with the OS to set up the MSIX vectors needed.
1425  *
1426  * Returns 0 on success, negative on failure
1427  **/
1428 static int
1429 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1430 {
1431         int err, vector_threshold;
1432
1433         /* We'll want at least 3 (vector_threshold):
1434          * 0) Other (Admin Queue and link, mostly)
1435          * 1) TxQ[0] Cleanup
1436          * 2) RxQ[0] Cleanup
1437          */
1438         vector_threshold = MIN_MSIX_COUNT;
1439
1440         /* The more we get, the more we will assign to Tx/Rx Cleanup
1441          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1442          * Right now, we simply care about how many we'll get; we'll
1443          * set them up later while requesting irq's.
1444          */
1445         err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1446                                     vector_threshold, vectors);
1447         if (err < 0) {
1448                 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1449                 kfree(adapter->msix_entries);
1450                 adapter->msix_entries = NULL;
1451                 return err;
1452         }
1453
1454         /* Adjust for only the vectors we'll use, which is minimum
1455          * of max_msix_q_vectors + NONQ_VECS, or the number of
1456          * vectors we were allocated.
1457          */
1458         adapter->num_msix_vectors = err;
1459         return 0;
1460 }
1461
1462 /**
1463  * iavf_free_queues - Free memory for all rings
1464  * @adapter: board private structure to initialize
1465  *
1466  * Free all of the memory associated with queue pairs.
1467  **/
1468 static void iavf_free_queues(struct iavf_adapter *adapter)
1469 {
1470         if (!adapter->vsi_res)
1471                 return;
1472         adapter->num_active_queues = 0;
1473         kfree(adapter->tx_rings);
1474         adapter->tx_rings = NULL;
1475         kfree(adapter->rx_rings);
1476         adapter->rx_rings = NULL;
1477 }
1478
1479 /**
1480  * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1481  * @adapter: board private structure
1482  *
1483  * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1484  * stripped in certain descriptor fields. Instead of checking the offload
1485  * capability bits in the hot path, cache the location the ring specific
1486  * flags.
1487  */
1488 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1489 {
1490         int i;
1491
1492         for (i = 0; i < adapter->num_active_queues; i++) {
1493                 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1494                 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1495
1496                 /* prevent multiple L2TAG bits being set after VFR */
1497                 tx_ring->flags &=
1498                         ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1499                           IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1500                 rx_ring->flags &=
1501                         ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1502                           IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1503
1504                 if (VLAN_ALLOWED(adapter)) {
1505                         tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1506                         rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1507                 } else if (VLAN_V2_ALLOWED(adapter)) {
1508                         struct virtchnl_vlan_supported_caps *stripping_support;
1509                         struct virtchnl_vlan_supported_caps *insertion_support;
1510
1511                         stripping_support =
1512                                 &adapter->vlan_v2_caps.offloads.stripping_support;
1513                         insertion_support =
1514                                 &adapter->vlan_v2_caps.offloads.insertion_support;
1515
1516                         if (stripping_support->outer) {
1517                                 if (stripping_support->outer &
1518                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1519                                         rx_ring->flags |=
1520                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1521                                 else if (stripping_support->outer &
1522                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1523                                         rx_ring->flags |=
1524                                                 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1525                         } else if (stripping_support->inner) {
1526                                 if (stripping_support->inner &
1527                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1528                                         rx_ring->flags |=
1529                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1530                                 else if (stripping_support->inner &
1531                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1532                                         rx_ring->flags |=
1533                                                 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1534                         }
1535
1536                         if (insertion_support->outer) {
1537                                 if (insertion_support->outer &
1538                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1539                                         tx_ring->flags |=
1540                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1541                                 else if (insertion_support->outer &
1542                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1543                                         tx_ring->flags |=
1544                                                 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1545                         } else if (insertion_support->inner) {
1546                                 if (insertion_support->inner &
1547                                     VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1548                                         tx_ring->flags |=
1549                                                 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1550                                 else if (insertion_support->inner &
1551                                          VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1552                                         tx_ring->flags |=
1553                                                 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1554                         }
1555                 }
1556         }
1557 }
1558
1559 /**
1560  * iavf_alloc_queues - Allocate memory for all rings
1561  * @adapter: board private structure to initialize
1562  *
1563  * We allocate one ring per queue at run-time since we don't know the
1564  * number of queues at compile-time.  The polling_netdev array is
1565  * intended for Multiqueue, but should work fine with a single queue.
1566  **/
1567 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1568 {
1569         int i, num_active_queues;
1570
1571         /* If we're in reset reallocating queues we don't actually know yet for
1572          * certain the PF gave us the number of queues we asked for but we'll
1573          * assume it did.  Once basic reset is finished we'll confirm once we
1574          * start negotiating config with PF.
1575          */
1576         if (adapter->num_req_queues)
1577                 num_active_queues = adapter->num_req_queues;
1578         else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1579                  adapter->num_tc)
1580                 num_active_queues = adapter->ch_config.total_qps;
1581         else
1582                 num_active_queues = min_t(int,
1583                                           adapter->vsi_res->num_queue_pairs,
1584                                           (int)(num_online_cpus()));
1585
1586
1587         adapter->tx_rings = kcalloc(num_active_queues,
1588                                     sizeof(struct iavf_ring), GFP_KERNEL);
1589         if (!adapter->tx_rings)
1590                 goto err_out;
1591         adapter->rx_rings = kcalloc(num_active_queues,
1592                                     sizeof(struct iavf_ring), GFP_KERNEL);
1593         if (!adapter->rx_rings)
1594                 goto err_out;
1595
1596         for (i = 0; i < num_active_queues; i++) {
1597                 struct iavf_ring *tx_ring;
1598                 struct iavf_ring *rx_ring;
1599
1600                 tx_ring = &adapter->tx_rings[i];
1601
1602                 tx_ring->queue_index = i;
1603                 tx_ring->netdev = adapter->netdev;
1604                 tx_ring->dev = &adapter->pdev->dev;
1605                 tx_ring->count = adapter->tx_desc_count;
1606                 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1607                 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1608                         tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1609
1610                 rx_ring = &adapter->rx_rings[i];
1611                 rx_ring->queue_index = i;
1612                 rx_ring->netdev = adapter->netdev;
1613                 rx_ring->dev = &adapter->pdev->dev;
1614                 rx_ring->count = adapter->rx_desc_count;
1615                 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1616         }
1617
1618         adapter->num_active_queues = num_active_queues;
1619
1620         iavf_set_queue_vlan_tag_loc(adapter);
1621
1622         return 0;
1623
1624 err_out:
1625         iavf_free_queues(adapter);
1626         return -ENOMEM;
1627 }
1628
1629 /**
1630  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1631  * @adapter: board private structure to initialize
1632  *
1633  * Attempt to configure the interrupts using the best available
1634  * capabilities of the hardware and the kernel.
1635  **/
1636 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1637 {
1638         int vector, v_budget;
1639         int pairs = 0;
1640         int err = 0;
1641
1642         if (!adapter->vsi_res) {
1643                 err = -EIO;
1644                 goto out;
1645         }
1646         pairs = adapter->num_active_queues;
1647
1648         /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1649          * us much good if we have more vectors than CPUs. However, we already
1650          * limit the total number of queues by the number of CPUs so we do not
1651          * need any further limiting here.
1652          */
1653         v_budget = min_t(int, pairs + NONQ_VECS,
1654                          (int)adapter->vf_res->max_vectors);
1655
1656         adapter->msix_entries = kcalloc(v_budget,
1657                                         sizeof(struct msix_entry), GFP_KERNEL);
1658         if (!adapter->msix_entries) {
1659                 err = -ENOMEM;
1660                 goto out;
1661         }
1662
1663         for (vector = 0; vector < v_budget; vector++)
1664                 adapter->msix_entries[vector].entry = vector;
1665
1666         err = iavf_acquire_msix_vectors(adapter, v_budget);
1667
1668 out:
1669         netif_set_real_num_rx_queues(adapter->netdev, pairs);
1670         netif_set_real_num_tx_queues(adapter->netdev, pairs);
1671         return err;
1672 }
1673
1674 /**
1675  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1676  * @adapter: board private structure
1677  *
1678  * Return 0 on success, negative on failure
1679  **/
1680 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1681 {
1682         struct iavf_aqc_get_set_rss_key_data *rss_key =
1683                 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1684         struct iavf_hw *hw = &adapter->hw;
1685         enum iavf_status status;
1686
1687         if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1688                 /* bail because we already have a command pending */
1689                 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1690                         adapter->current_op);
1691                 return -EBUSY;
1692         }
1693
1694         status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1695         if (status) {
1696                 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1697                         iavf_stat_str(hw, status),
1698                         iavf_aq_str(hw, hw->aq.asq_last_status));
1699                 return iavf_status_to_errno(status);
1700
1701         }
1702
1703         status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1704                                      adapter->rss_lut, adapter->rss_lut_size);
1705         if (status) {
1706                 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1707                         iavf_stat_str(hw, status),
1708                         iavf_aq_str(hw, hw->aq.asq_last_status));
1709                 return iavf_status_to_errno(status);
1710         }
1711
1712         return 0;
1713
1714 }
1715
1716 /**
1717  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1718  * @adapter: board private structure
1719  *
1720  * Returns 0 on success, negative on failure
1721  **/
1722 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1723 {
1724         struct iavf_hw *hw = &adapter->hw;
1725         u32 *dw;
1726         u16 i;
1727
1728         dw = (u32 *)adapter->rss_key;
1729         for (i = 0; i <= adapter->rss_key_size / 4; i++)
1730                 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1731
1732         dw = (u32 *)adapter->rss_lut;
1733         for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1734                 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1735
1736         iavf_flush(hw);
1737
1738         return 0;
1739 }
1740
1741 /**
1742  * iavf_config_rss - Configure RSS keys and lut
1743  * @adapter: board private structure
1744  *
1745  * Returns 0 on success, negative on failure
1746  **/
1747 int iavf_config_rss(struct iavf_adapter *adapter)
1748 {
1749
1750         if (RSS_PF(adapter)) {
1751                 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1752                                         IAVF_FLAG_AQ_SET_RSS_KEY;
1753                 return 0;
1754         } else if (RSS_AQ(adapter)) {
1755                 return iavf_config_rss_aq(adapter);
1756         } else {
1757                 return iavf_config_rss_reg(adapter);
1758         }
1759 }
1760
1761 /**
1762  * iavf_fill_rss_lut - Fill the lut with default values
1763  * @adapter: board private structure
1764  **/
1765 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1766 {
1767         u16 i;
1768
1769         for (i = 0; i < adapter->rss_lut_size; i++)
1770                 adapter->rss_lut[i] = i % adapter->num_active_queues;
1771 }
1772
1773 /**
1774  * iavf_init_rss - Prepare for RSS
1775  * @adapter: board private structure
1776  *
1777  * Return 0 on success, negative on failure
1778  **/
1779 static int iavf_init_rss(struct iavf_adapter *adapter)
1780 {
1781         struct iavf_hw *hw = &adapter->hw;
1782
1783         if (!RSS_PF(adapter)) {
1784                 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1785                 if (adapter->vf_res->vf_cap_flags &
1786                     VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1787                         adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1788                 else
1789                         adapter->hena = IAVF_DEFAULT_RSS_HENA;
1790
1791                 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1792                 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1793         }
1794
1795         iavf_fill_rss_lut(adapter);
1796         netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1797
1798         return iavf_config_rss(adapter);
1799 }
1800
1801 /**
1802  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1803  * @adapter: board private structure to initialize
1804  *
1805  * We allocate one q_vector per queue interrupt.  If allocation fails we
1806  * return -ENOMEM.
1807  **/
1808 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1809 {
1810         int q_idx = 0, num_q_vectors;
1811         struct iavf_q_vector *q_vector;
1812
1813         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1814         adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1815                                      GFP_KERNEL);
1816         if (!adapter->q_vectors)
1817                 return -ENOMEM;
1818
1819         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1820                 q_vector = &adapter->q_vectors[q_idx];
1821                 q_vector->adapter = adapter;
1822                 q_vector->vsi = &adapter->vsi;
1823                 q_vector->v_idx = q_idx;
1824                 q_vector->reg_idx = q_idx;
1825                 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1826                 netif_napi_add(adapter->netdev, &q_vector->napi,
1827                                iavf_napi_poll);
1828         }
1829
1830         return 0;
1831 }
1832
1833 /**
1834  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1835  * @adapter: board private structure to initialize
1836  *
1837  * This function frees the memory allocated to the q_vectors.  In addition if
1838  * NAPI is enabled it will delete any references to the NAPI struct prior
1839  * to freeing the q_vector.
1840  **/
1841 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1842 {
1843         int q_idx, num_q_vectors;
1844         int napi_vectors;
1845
1846         if (!adapter->q_vectors)
1847                 return;
1848
1849         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1850         napi_vectors = adapter->num_active_queues;
1851
1852         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1853                 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1854
1855                 if (q_idx < napi_vectors)
1856                         netif_napi_del(&q_vector->napi);
1857         }
1858         kfree(adapter->q_vectors);
1859         adapter->q_vectors = NULL;
1860 }
1861
1862 /**
1863  * iavf_reset_interrupt_capability - Reset MSIX setup
1864  * @adapter: board private structure
1865  *
1866  **/
1867 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1868 {
1869         if (!adapter->msix_entries)
1870                 return;
1871
1872         pci_disable_msix(adapter->pdev);
1873         kfree(adapter->msix_entries);
1874         adapter->msix_entries = NULL;
1875 }
1876
1877 /**
1878  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1879  * @adapter: board private structure to initialize
1880  *
1881  **/
1882 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1883 {
1884         int err;
1885
1886         err = iavf_alloc_queues(adapter);
1887         if (err) {
1888                 dev_err(&adapter->pdev->dev,
1889                         "Unable to allocate memory for queues\n");
1890                 goto err_alloc_queues;
1891         }
1892
1893         rtnl_lock();
1894         err = iavf_set_interrupt_capability(adapter);
1895         rtnl_unlock();
1896         if (err) {
1897                 dev_err(&adapter->pdev->dev,
1898                         "Unable to setup interrupt capabilities\n");
1899                 goto err_set_interrupt;
1900         }
1901
1902         err = iavf_alloc_q_vectors(adapter);
1903         if (err) {
1904                 dev_err(&adapter->pdev->dev,
1905                         "Unable to allocate memory for queue vectors\n");
1906                 goto err_alloc_q_vectors;
1907         }
1908
1909         /* If we've made it so far while ADq flag being ON, then we haven't
1910          * bailed out anywhere in middle. And ADq isn't just enabled but actual
1911          * resources have been allocated in the reset path.
1912          * Now we can truly claim that ADq is enabled.
1913          */
1914         if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1915             adapter->num_tc)
1916                 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1917                          adapter->num_tc);
1918
1919         dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1920                  (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1921                  adapter->num_active_queues);
1922
1923         return 0;
1924 err_alloc_q_vectors:
1925         iavf_reset_interrupt_capability(adapter);
1926 err_set_interrupt:
1927         iavf_free_queues(adapter);
1928 err_alloc_queues:
1929         return err;
1930 }
1931
1932 /**
1933  * iavf_free_rss - Free memory used by RSS structs
1934  * @adapter: board private structure
1935  **/
1936 static void iavf_free_rss(struct iavf_adapter *adapter)
1937 {
1938         kfree(adapter->rss_key);
1939         adapter->rss_key = NULL;
1940
1941         kfree(adapter->rss_lut);
1942         adapter->rss_lut = NULL;
1943 }
1944
1945 /**
1946  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1947  * @adapter: board private structure
1948  *
1949  * Returns 0 on success, negative on failure
1950  **/
1951 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1952 {
1953         struct net_device *netdev = adapter->netdev;
1954         int err;
1955
1956         if (netif_running(netdev))
1957                 iavf_free_traffic_irqs(adapter);
1958         iavf_free_misc_irq(adapter);
1959         iavf_reset_interrupt_capability(adapter);
1960         iavf_free_q_vectors(adapter);
1961         iavf_free_queues(adapter);
1962
1963         err =  iavf_init_interrupt_scheme(adapter);
1964         if (err)
1965                 goto err;
1966
1967         netif_tx_stop_all_queues(netdev);
1968
1969         err = iavf_request_misc_irq(adapter);
1970         if (err)
1971                 goto err;
1972
1973         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1974
1975         iavf_map_rings_to_vectors(adapter);
1976 err:
1977         return err;
1978 }
1979
1980 /**
1981  * iavf_process_aq_command - process aq_required flags
1982  * and sends aq command
1983  * @adapter: pointer to iavf adapter structure
1984  *
1985  * Returns 0 on success
1986  * Returns error code if no command was sent
1987  * or error code if the command failed.
1988  **/
1989 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1990 {
1991         if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1992                 return iavf_send_vf_config_msg(adapter);
1993         if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
1994                 return iavf_send_vf_offload_vlan_v2_msg(adapter);
1995         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1996                 iavf_disable_queues(adapter);
1997                 return 0;
1998         }
1999
2000         if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2001                 iavf_map_queues(adapter);
2002                 return 0;
2003         }
2004
2005         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2006                 iavf_add_ether_addrs(adapter);
2007                 return 0;
2008         }
2009
2010         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2011                 iavf_add_vlans(adapter);
2012                 return 0;
2013         }
2014
2015         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2016                 iavf_del_ether_addrs(adapter);
2017                 return 0;
2018         }
2019
2020         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2021                 iavf_del_vlans(adapter);
2022                 return 0;
2023         }
2024
2025         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2026                 iavf_enable_vlan_stripping(adapter);
2027                 return 0;
2028         }
2029
2030         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2031                 iavf_disable_vlan_stripping(adapter);
2032                 return 0;
2033         }
2034
2035         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2036                 iavf_configure_queues(adapter);
2037                 return 0;
2038         }
2039
2040         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2041                 iavf_enable_queues(adapter);
2042                 return 0;
2043         }
2044
2045         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2046                 /* This message goes straight to the firmware, not the
2047                  * PF, so we don't have to set current_op as we will
2048                  * not get a response through the ARQ.
2049                  */
2050                 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2051                 return 0;
2052         }
2053         if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2054                 iavf_get_hena(adapter);
2055                 return 0;
2056         }
2057         if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2058                 iavf_set_hena(adapter);
2059                 return 0;
2060         }
2061         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2062                 iavf_set_rss_key(adapter);
2063                 return 0;
2064         }
2065         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2066                 iavf_set_rss_lut(adapter);
2067                 return 0;
2068         }
2069
2070         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2071                 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2072                                        FLAG_VF_MULTICAST_PROMISC);
2073                 return 0;
2074         }
2075
2076         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2077                 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2078                 return 0;
2079         }
2080         if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2081             (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2082                 iavf_set_promiscuous(adapter, 0);
2083                 return 0;
2084         }
2085
2086         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2087                 iavf_enable_channels(adapter);
2088                 return 0;
2089         }
2090
2091         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2092                 iavf_disable_channels(adapter);
2093                 return 0;
2094         }
2095         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2096                 iavf_add_cloud_filter(adapter);
2097                 return 0;
2098         }
2099
2100         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2101                 iavf_del_cloud_filter(adapter);
2102                 return 0;
2103         }
2104         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2105                 iavf_del_cloud_filter(adapter);
2106                 return 0;
2107         }
2108         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2109                 iavf_add_cloud_filter(adapter);
2110                 return 0;
2111         }
2112         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2113                 iavf_add_fdir_filter(adapter);
2114                 return IAVF_SUCCESS;
2115         }
2116         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2117                 iavf_del_fdir_filter(adapter);
2118                 return IAVF_SUCCESS;
2119         }
2120         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2121                 iavf_add_adv_rss_cfg(adapter);
2122                 return 0;
2123         }
2124         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2125                 iavf_del_adv_rss_cfg(adapter);
2126                 return 0;
2127         }
2128         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2129                 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2130                 return 0;
2131         }
2132         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2133                 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2134                 return 0;
2135         }
2136         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2137                 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2138                 return 0;
2139         }
2140         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2141                 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2142                 return 0;
2143         }
2144         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2145                 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2146                 return 0;
2147         }
2148         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2149                 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2150                 return 0;
2151         }
2152         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2153                 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2154                 return 0;
2155         }
2156         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2157                 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2158                 return 0;
2159         }
2160
2161         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2162                 iavf_request_stats(adapter);
2163                 return 0;
2164         }
2165
2166         return -EAGAIN;
2167 }
2168
2169 /**
2170  * iavf_set_vlan_offload_features - set VLAN offload configuration
2171  * @adapter: board private structure
2172  * @prev_features: previous features used for comparison
2173  * @features: updated features used for configuration
2174  *
2175  * Set the aq_required bit(s) based on the requested features passed in to
2176  * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2177  * the watchdog if any changes are requested to expedite the request via
2178  * virtchnl.
2179  **/
2180 void
2181 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2182                                netdev_features_t prev_features,
2183                                netdev_features_t features)
2184 {
2185         bool enable_stripping = true, enable_insertion = true;
2186         u16 vlan_ethertype = 0;
2187         u64 aq_required = 0;
2188
2189         /* keep cases separate because one ethertype for offloads can be
2190          * disabled at the same time as another is disabled, so check for an
2191          * enabled ethertype first, then check for disabled. Default to
2192          * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2193          * stripping.
2194          */
2195         if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2196                 vlan_ethertype = ETH_P_8021AD;
2197         else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2198                 vlan_ethertype = ETH_P_8021Q;
2199         else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2200                 vlan_ethertype = ETH_P_8021AD;
2201         else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2202                 vlan_ethertype = ETH_P_8021Q;
2203         else
2204                 vlan_ethertype = ETH_P_8021Q;
2205
2206         if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2207                 enable_stripping = false;
2208         if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2209                 enable_insertion = false;
2210
2211         if (VLAN_ALLOWED(adapter)) {
2212                 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2213                  * stripping via virtchnl. VLAN insertion can be toggled on the
2214                  * netdev, but it doesn't require a virtchnl message
2215                  */
2216                 if (enable_stripping)
2217                         aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2218                 else
2219                         aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2220
2221         } else if (VLAN_V2_ALLOWED(adapter)) {
2222                 switch (vlan_ethertype) {
2223                 case ETH_P_8021Q:
2224                         if (enable_stripping)
2225                                 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2226                         else
2227                                 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2228
2229                         if (enable_insertion)
2230                                 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2231                         else
2232                                 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2233                         break;
2234                 case ETH_P_8021AD:
2235                         if (enable_stripping)
2236                                 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2237                         else
2238                                 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2239
2240                         if (enable_insertion)
2241                                 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2242                         else
2243                                 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2244                         break;
2245                 }
2246         }
2247
2248         if (aq_required) {
2249                 adapter->aq_required |= aq_required;
2250                 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
2251         }
2252 }
2253
2254 /**
2255  * iavf_startup - first step of driver startup
2256  * @adapter: board private structure
2257  *
2258  * Function process __IAVF_STARTUP driver state.
2259  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2260  * when fails the state is changed to __IAVF_INIT_FAILED
2261  **/
2262 static void iavf_startup(struct iavf_adapter *adapter)
2263 {
2264         struct pci_dev *pdev = adapter->pdev;
2265         struct iavf_hw *hw = &adapter->hw;
2266         enum iavf_status status;
2267         int ret;
2268
2269         WARN_ON(adapter->state != __IAVF_STARTUP);
2270
2271         /* driver loaded, probe complete */
2272         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2273         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2274         status = iavf_set_mac_type(hw);
2275         if (status) {
2276                 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2277                 goto err;
2278         }
2279
2280         ret = iavf_check_reset_complete(hw);
2281         if (ret) {
2282                 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2283                          ret);
2284                 goto err;
2285         }
2286         hw->aq.num_arq_entries = IAVF_AQ_LEN;
2287         hw->aq.num_asq_entries = IAVF_AQ_LEN;
2288         hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2289         hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2290
2291         status = iavf_init_adminq(hw);
2292         if (status) {
2293                 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2294                         status);
2295                 goto err;
2296         }
2297         ret = iavf_send_api_ver(adapter);
2298         if (ret) {
2299                 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2300                 iavf_shutdown_adminq(hw);
2301                 goto err;
2302         }
2303         iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2304         return;
2305 err:
2306         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2307 }
2308
2309 /**
2310  * iavf_init_version_check - second step of driver startup
2311  * @adapter: board private structure
2312  *
2313  * Function process __IAVF_INIT_VERSION_CHECK driver state.
2314  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2315  * when fails the state is changed to __IAVF_INIT_FAILED
2316  **/
2317 static void iavf_init_version_check(struct iavf_adapter *adapter)
2318 {
2319         struct pci_dev *pdev = adapter->pdev;
2320         struct iavf_hw *hw = &adapter->hw;
2321         int err = -EAGAIN;
2322
2323         WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2324
2325         if (!iavf_asq_done(hw)) {
2326                 dev_err(&pdev->dev, "Admin queue command never completed\n");
2327                 iavf_shutdown_adminq(hw);
2328                 iavf_change_state(adapter, __IAVF_STARTUP);
2329                 goto err;
2330         }
2331
2332         /* aq msg sent, awaiting reply */
2333         err = iavf_verify_api_ver(adapter);
2334         if (err) {
2335                 if (err == -EALREADY)
2336                         err = iavf_send_api_ver(adapter);
2337                 else
2338                         dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2339                                 adapter->pf_version.major,
2340                                 adapter->pf_version.minor,
2341                                 VIRTCHNL_VERSION_MAJOR,
2342                                 VIRTCHNL_VERSION_MINOR);
2343                 goto err;
2344         }
2345         err = iavf_send_vf_config_msg(adapter);
2346         if (err) {
2347                 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2348                         err);
2349                 goto err;
2350         }
2351         iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2352         return;
2353 err:
2354         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2355 }
2356
2357 /**
2358  * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2359  * @adapter: board private structure
2360  */
2361 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2362 {
2363         int i, num_req_queues = adapter->num_req_queues;
2364         struct iavf_vsi *vsi = &adapter->vsi;
2365
2366         for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2367                 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2368                         adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2369         }
2370         if (!adapter->vsi_res) {
2371                 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2372                 return -ENODEV;
2373         }
2374
2375         if (num_req_queues &&
2376             num_req_queues > adapter->vsi_res->num_queue_pairs) {
2377                 /* Problem.  The PF gave us fewer queues than what we had
2378                  * negotiated in our request.  Need a reset to see if we can't
2379                  * get back to a working state.
2380                  */
2381                 dev_err(&adapter->pdev->dev,
2382                         "Requested %d queues, but PF only gave us %d.\n",
2383                         num_req_queues,
2384                         adapter->vsi_res->num_queue_pairs);
2385                 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2386                 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2387                 iavf_schedule_reset(adapter);
2388
2389                 return -EAGAIN;
2390         }
2391         adapter->num_req_queues = 0;
2392         adapter->vsi.id = adapter->vsi_res->vsi_id;
2393
2394         adapter->vsi.back = adapter;
2395         adapter->vsi.base_vector = 1;
2396         vsi->netdev = adapter->netdev;
2397         vsi->qs_handle = adapter->vsi_res->qset_handle;
2398         if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2399                 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2400                 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2401         } else {
2402                 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2403                 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2404         }
2405
2406         return 0;
2407 }
2408
2409 /**
2410  * iavf_init_get_resources - third step of driver startup
2411  * @adapter: board private structure
2412  *
2413  * Function process __IAVF_INIT_GET_RESOURCES driver state and
2414  * finishes driver initialization procedure.
2415  * When success the state is changed to __IAVF_DOWN
2416  * when fails the state is changed to __IAVF_INIT_FAILED
2417  **/
2418 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2419 {
2420         struct pci_dev *pdev = adapter->pdev;
2421         struct iavf_hw *hw = &adapter->hw;
2422         int err;
2423
2424         WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2425         /* aq msg sent, awaiting reply */
2426         if (!adapter->vf_res) {
2427                 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2428                                           GFP_KERNEL);
2429                 if (!adapter->vf_res) {
2430                         err = -ENOMEM;
2431                         goto err;
2432                 }
2433         }
2434         err = iavf_get_vf_config(adapter);
2435         if (err == -EALREADY) {
2436                 err = iavf_send_vf_config_msg(adapter);
2437                 goto err;
2438         } else if (err == -EINVAL) {
2439                 /* We only get -EINVAL if the device is in a very bad
2440                  * state or if we've been disabled for previous bad
2441                  * behavior. Either way, we're done now.
2442                  */
2443                 iavf_shutdown_adminq(hw);
2444                 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2445                 return;
2446         }
2447         if (err) {
2448                 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2449                 goto err_alloc;
2450         }
2451
2452         err = iavf_parse_vf_resource_msg(adapter);
2453         if (err) {
2454                 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2455                         err);
2456                 goto err_alloc;
2457         }
2458         /* Some features require additional messages to negotiate extended
2459          * capabilities. These are processed in sequence by the
2460          * __IAVF_INIT_EXTENDED_CAPS driver state.
2461          */
2462         adapter->extended_caps = IAVF_EXTENDED_CAPS;
2463
2464         iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2465         return;
2466
2467 err_alloc:
2468         kfree(adapter->vf_res);
2469         adapter->vf_res = NULL;
2470 err:
2471         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2472 }
2473
2474 /**
2475  * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2476  * @adapter: board private structure
2477  *
2478  * Function processes send of the extended VLAN V2 capability message to the
2479  * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2480  * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2481  */
2482 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2483 {
2484         int ret;
2485
2486         WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2487
2488         ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2489         if (ret && ret == -EOPNOTSUPP) {
2490                 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2491                  * we did not send the capability exchange message and do not
2492                  * expect a response.
2493                  */
2494                 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2495         }
2496
2497         /* We sent the message, so move on to the next step */
2498         adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2499 }
2500
2501 /**
2502  * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2503  * @adapter: board private structure
2504  *
2505  * Function processes receipt of the extended VLAN V2 capability message from
2506  * the PF.
2507  **/
2508 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2509 {
2510         int ret;
2511
2512         WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2513
2514         memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2515
2516         ret = iavf_get_vf_vlan_v2_caps(adapter);
2517         if (ret)
2518                 goto err;
2519
2520         /* We've processed receipt of the VLAN V2 caps message */
2521         adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2522         return;
2523 err:
2524         /* We didn't receive a reply. Make sure we try sending again when
2525          * __IAVF_INIT_FAILED attempts to recover.
2526          */
2527         adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2528         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2529 }
2530
2531 /**
2532  * iavf_init_process_extended_caps - Part of driver startup
2533  * @adapter: board private structure
2534  *
2535  * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2536  * handles negotiating capabilities for features which require an additional
2537  * message.
2538  *
2539  * Once all extended capabilities exchanges are finished, the driver will
2540  * transition into __IAVF_INIT_CONFIG_ADAPTER.
2541  */
2542 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2543 {
2544         WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2545
2546         /* Process capability exchange for VLAN V2 */
2547         if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2548                 iavf_init_send_offload_vlan_v2_caps(adapter);
2549                 return;
2550         } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2551                 iavf_init_recv_offload_vlan_v2_caps(adapter);
2552                 return;
2553         }
2554
2555         /* When we reach here, no further extended capabilities exchanges are
2556          * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2557          */
2558         iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2559 }
2560
2561 /**
2562  * iavf_init_config_adapter - last part of driver startup
2563  * @adapter: board private structure
2564  *
2565  * After all the supported capabilities are negotiated, then the
2566  * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2567  */
2568 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2569 {
2570         struct net_device *netdev = adapter->netdev;
2571         struct pci_dev *pdev = adapter->pdev;
2572         int err;
2573
2574         WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2575
2576         if (iavf_process_config(adapter))
2577                 goto err;
2578
2579         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2580
2581         adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2582
2583         netdev->netdev_ops = &iavf_netdev_ops;
2584         iavf_set_ethtool_ops(netdev);
2585         netdev->watchdog_timeo = 5 * HZ;
2586
2587         /* MTU range: 68 - 9710 */
2588         netdev->min_mtu = ETH_MIN_MTU;
2589         netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2590
2591         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2592                 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2593                          adapter->hw.mac.addr);
2594                 eth_hw_addr_random(netdev);
2595                 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2596         } else {
2597                 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2598                 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2599         }
2600
2601         adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2602         adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2603         err = iavf_init_interrupt_scheme(adapter);
2604         if (err)
2605                 goto err_sw_init;
2606         iavf_map_rings_to_vectors(adapter);
2607         if (adapter->vf_res->vf_cap_flags &
2608                 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2609                 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2610
2611         err = iavf_request_misc_irq(adapter);
2612         if (err)
2613                 goto err_sw_init;
2614
2615         netif_carrier_off(netdev);
2616         adapter->link_up = false;
2617
2618         /* set the semaphore to prevent any callbacks after device registration
2619          * up to time when state of driver will be set to __IAVF_DOWN
2620          */
2621         rtnl_lock();
2622         if (!adapter->netdev_registered) {
2623                 err = register_netdevice(netdev);
2624                 if (err) {
2625                         rtnl_unlock();
2626                         goto err_register;
2627                 }
2628         }
2629
2630         adapter->netdev_registered = true;
2631
2632         netif_tx_stop_all_queues(netdev);
2633         if (CLIENT_ALLOWED(adapter)) {
2634                 err = iavf_lan_add_device(adapter);
2635                 if (err)
2636                         dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2637                                  err);
2638         }
2639         dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2640         if (netdev->features & NETIF_F_GRO)
2641                 dev_info(&pdev->dev, "GRO is enabled\n");
2642
2643         iavf_change_state(adapter, __IAVF_DOWN);
2644         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2645         rtnl_unlock();
2646
2647         iavf_misc_irq_enable(adapter);
2648         wake_up(&adapter->down_waitqueue);
2649
2650         adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2651         adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2652         if (!adapter->rss_key || !adapter->rss_lut) {
2653                 err = -ENOMEM;
2654                 goto err_mem;
2655         }
2656         if (RSS_AQ(adapter))
2657                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2658         else
2659                 iavf_init_rss(adapter);
2660
2661         if (VLAN_V2_ALLOWED(adapter))
2662                 /* request initial VLAN offload settings */
2663                 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2664
2665         return;
2666 err_mem:
2667         iavf_free_rss(adapter);
2668 err_register:
2669         iavf_free_misc_irq(adapter);
2670 err_sw_init:
2671         iavf_reset_interrupt_capability(adapter);
2672 err:
2673         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2674 }
2675
2676 /**
2677  * iavf_watchdog_task - Periodic call-back task
2678  * @work: pointer to work_struct
2679  **/
2680 static void iavf_watchdog_task(struct work_struct *work)
2681 {
2682         struct iavf_adapter *adapter = container_of(work,
2683                                                     struct iavf_adapter,
2684                                                     watchdog_task.work);
2685         struct iavf_hw *hw = &adapter->hw;
2686         u32 reg_val;
2687
2688         if (!mutex_trylock(&adapter->crit_lock)) {
2689                 if (adapter->state == __IAVF_REMOVE)
2690                         return;
2691
2692                 goto restart_watchdog;
2693         }
2694
2695         if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
2696             adapter->netdev_registered &&
2697             !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
2698             rtnl_trylock()) {
2699                 netdev_update_features(adapter->netdev);
2700                 rtnl_unlock();
2701                 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2702         }
2703
2704         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2705                 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2706
2707         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2708                 adapter->aq_required = 0;
2709                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2710                 mutex_unlock(&adapter->crit_lock);
2711                 queue_work(adapter->wq, &adapter->reset_task);
2712                 return;
2713         }
2714
2715         switch (adapter->state) {
2716         case __IAVF_STARTUP:
2717                 iavf_startup(adapter);
2718                 mutex_unlock(&adapter->crit_lock);
2719                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2720                                    msecs_to_jiffies(30));
2721                 return;
2722         case __IAVF_INIT_VERSION_CHECK:
2723                 iavf_init_version_check(adapter);
2724                 mutex_unlock(&adapter->crit_lock);
2725                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2726                                    msecs_to_jiffies(30));
2727                 return;
2728         case __IAVF_INIT_GET_RESOURCES:
2729                 iavf_init_get_resources(adapter);
2730                 mutex_unlock(&adapter->crit_lock);
2731                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2732                                    msecs_to_jiffies(1));
2733                 return;
2734         case __IAVF_INIT_EXTENDED_CAPS:
2735                 iavf_init_process_extended_caps(adapter);
2736                 mutex_unlock(&adapter->crit_lock);
2737                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2738                                    msecs_to_jiffies(1));
2739                 return;
2740         case __IAVF_INIT_CONFIG_ADAPTER:
2741                 iavf_init_config_adapter(adapter);
2742                 mutex_unlock(&adapter->crit_lock);
2743                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2744                                    msecs_to_jiffies(1));
2745                 return;
2746         case __IAVF_INIT_FAILED:
2747                 if (test_bit(__IAVF_IN_REMOVE_TASK,
2748                              &adapter->crit_section)) {
2749                         /* Do not update the state and do not reschedule
2750                          * watchdog task, iavf_remove should handle this state
2751                          * as it can loop forever
2752                          */
2753                         mutex_unlock(&adapter->crit_lock);
2754                         return;
2755                 }
2756                 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2757                         dev_err(&adapter->pdev->dev,
2758                                 "Failed to communicate with PF; waiting before retry\n");
2759                         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2760                         iavf_shutdown_adminq(hw);
2761                         mutex_unlock(&adapter->crit_lock);
2762                         queue_delayed_work(adapter->wq,
2763                                            &adapter->watchdog_task, (5 * HZ));
2764                         return;
2765                 }
2766                 /* Try again from failed step*/
2767                 iavf_change_state(adapter, adapter->last_state);
2768                 mutex_unlock(&adapter->crit_lock);
2769                 queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
2770                 return;
2771         case __IAVF_COMM_FAILED:
2772                 if (test_bit(__IAVF_IN_REMOVE_TASK,
2773                              &adapter->crit_section)) {
2774                         /* Set state to __IAVF_INIT_FAILED and perform remove
2775                          * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2776                          * doesn't bring the state back to __IAVF_COMM_FAILED.
2777                          */
2778                         iavf_change_state(adapter, __IAVF_INIT_FAILED);
2779                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2780                         mutex_unlock(&adapter->crit_lock);
2781                         return;
2782                 }
2783                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2784                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2785                 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2786                     reg_val == VIRTCHNL_VFR_COMPLETED) {
2787                         /* A chance for redemption! */
2788                         dev_err(&adapter->pdev->dev,
2789                                 "Hardware came out of reset. Attempting reinit.\n");
2790                         /* When init task contacts the PF and
2791                          * gets everything set up again, it'll restart the
2792                          * watchdog for us. Down, boy. Sit. Stay. Woof.
2793                          */
2794                         iavf_change_state(adapter, __IAVF_STARTUP);
2795                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2796                 }
2797                 adapter->aq_required = 0;
2798                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2799                 mutex_unlock(&adapter->crit_lock);
2800                 queue_delayed_work(adapter->wq,
2801                                    &adapter->watchdog_task,
2802                                    msecs_to_jiffies(10));
2803                 return;
2804         case __IAVF_RESETTING:
2805                 mutex_unlock(&adapter->crit_lock);
2806                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2807                                    HZ * 2);
2808                 return;
2809         case __IAVF_DOWN:
2810         case __IAVF_DOWN_PENDING:
2811         case __IAVF_TESTING:
2812         case __IAVF_RUNNING:
2813                 if (adapter->current_op) {
2814                         if (!iavf_asq_done(hw)) {
2815                                 dev_dbg(&adapter->pdev->dev,
2816                                         "Admin queue timeout\n");
2817                                 iavf_send_api_ver(adapter);
2818                         }
2819                 } else {
2820                         int ret = iavf_process_aq_command(adapter);
2821
2822                         /* An error will be returned if no commands were
2823                          * processed; use this opportunity to update stats
2824                          * if the error isn't -ENOTSUPP
2825                          */
2826                         if (ret && ret != -EOPNOTSUPP &&
2827                             adapter->state == __IAVF_RUNNING)
2828                                 iavf_request_stats(adapter);
2829                 }
2830                 if (adapter->state == __IAVF_RUNNING)
2831                         iavf_detect_recover_hung(&adapter->vsi);
2832                 break;
2833         case __IAVF_REMOVE:
2834         default:
2835                 mutex_unlock(&adapter->crit_lock);
2836                 return;
2837         }
2838
2839         /* check for hw reset */
2840         reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2841         if (!reg_val) {
2842                 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2843                 adapter->aq_required = 0;
2844                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2845                 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2846                 queue_work(adapter->wq, &adapter->reset_task);
2847                 mutex_unlock(&adapter->crit_lock);
2848                 queue_delayed_work(adapter->wq,
2849                                    &adapter->watchdog_task, HZ * 2);
2850                 return;
2851         }
2852
2853         schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2854         mutex_unlock(&adapter->crit_lock);
2855 restart_watchdog:
2856         if (adapter->state >= __IAVF_DOWN)
2857                 queue_work(adapter->wq, &adapter->adminq_task);
2858         if (adapter->aq_required)
2859                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2860                                    msecs_to_jiffies(20));
2861         else
2862                 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2863                                    HZ * 2);
2864 }
2865
2866 /**
2867  * iavf_disable_vf - disable VF
2868  * @adapter: board private structure
2869  *
2870  * Set communication failed flag and free all resources.
2871  * NOTE: This function is expected to be called with crit_lock being held.
2872  **/
2873 static void iavf_disable_vf(struct iavf_adapter *adapter)
2874 {
2875         struct iavf_mac_filter *f, *ftmp;
2876         struct iavf_vlan_filter *fv, *fvtmp;
2877         struct iavf_cloud_filter *cf, *cftmp;
2878
2879         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2880
2881         /* We don't use netif_running() because it may be true prior to
2882          * ndo_open() returning, so we can't assume it means all our open
2883          * tasks have finished, since we're not holding the rtnl_lock here.
2884          */
2885         if (adapter->state == __IAVF_RUNNING) {
2886                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2887                 netif_carrier_off(adapter->netdev);
2888                 netif_tx_disable(adapter->netdev);
2889                 adapter->link_up = false;
2890                 iavf_napi_disable_all(adapter);
2891                 iavf_irq_disable(adapter);
2892                 iavf_free_traffic_irqs(adapter);
2893                 iavf_free_all_tx_resources(adapter);
2894                 iavf_free_all_rx_resources(adapter);
2895         }
2896
2897         spin_lock_bh(&adapter->mac_vlan_list_lock);
2898
2899         /* Delete all of the filters */
2900         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2901                 list_del(&f->list);
2902                 kfree(f);
2903         }
2904
2905         list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2906                 list_del(&fv->list);
2907                 kfree(fv);
2908         }
2909
2910         spin_unlock_bh(&adapter->mac_vlan_list_lock);
2911
2912         spin_lock_bh(&adapter->cloud_filter_list_lock);
2913         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2914                 list_del(&cf->list);
2915                 kfree(cf);
2916                 adapter->num_cloud_filters--;
2917         }
2918         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2919
2920         iavf_free_misc_irq(adapter);
2921         iavf_reset_interrupt_capability(adapter);
2922         iavf_free_q_vectors(adapter);
2923         iavf_free_queues(adapter);
2924         memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2925         iavf_shutdown_adminq(&adapter->hw);
2926         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2927         iavf_change_state(adapter, __IAVF_DOWN);
2928         wake_up(&adapter->down_waitqueue);
2929         dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2930 }
2931
2932 /**
2933  * iavf_reset_task - Call-back task to handle hardware reset
2934  * @work: pointer to work_struct
2935  *
2936  * During reset we need to shut down and reinitialize the admin queue
2937  * before we can use it to communicate with the PF again. We also clear
2938  * and reinit the rings because that context is lost as well.
2939  **/
2940 static void iavf_reset_task(struct work_struct *work)
2941 {
2942         struct iavf_adapter *adapter = container_of(work,
2943                                                       struct iavf_adapter,
2944                                                       reset_task);
2945         struct virtchnl_vf_resource *vfres = adapter->vf_res;
2946         struct net_device *netdev = adapter->netdev;
2947         struct iavf_hw *hw = &adapter->hw;
2948         struct iavf_mac_filter *f, *ftmp;
2949         struct iavf_cloud_filter *cf;
2950         enum iavf_status status;
2951         u32 reg_val;
2952         int i = 0, err;
2953         bool running;
2954
2955         /* Detach interface to avoid subsequent NDO callbacks */
2956         rtnl_lock();
2957         netif_device_detach(netdev);
2958         rtnl_unlock();
2959
2960         /* When device is being removed it doesn't make sense to run the reset
2961          * task, just return in such a case.
2962          */
2963         if (!mutex_trylock(&adapter->crit_lock)) {
2964                 if (adapter->state != __IAVF_REMOVE)
2965                         queue_work(adapter->wq, &adapter->reset_task);
2966
2967                 goto reset_finish;
2968         }
2969
2970         while (!mutex_trylock(&adapter->client_lock))
2971                 usleep_range(500, 1000);
2972         if (CLIENT_ENABLED(adapter)) {
2973                 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2974                                     IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2975                                     IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2976                                     IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2977                 cancel_delayed_work_sync(&adapter->client_task);
2978                 iavf_notify_client_close(&adapter->vsi, true);
2979         }
2980         iavf_misc_irq_disable(adapter);
2981         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2982                 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2983                 /* Restart the AQ here. If we have been reset but didn't
2984                  * detect it, or if the PF had to reinit, our AQ will be hosed.
2985                  */
2986                 iavf_shutdown_adminq(hw);
2987                 iavf_init_adminq(hw);
2988                 iavf_request_reset(adapter);
2989         }
2990         adapter->flags |= IAVF_FLAG_RESET_PENDING;
2991
2992         /* poll until we see the reset actually happen */
2993         for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2994                 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2995                           IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2996                 if (!reg_val)
2997                         break;
2998                 usleep_range(5000, 10000);
2999         }
3000         if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
3001                 dev_info(&adapter->pdev->dev, "Never saw reset\n");
3002                 goto continue_reset; /* act like the reset happened */
3003         }
3004
3005         /* wait until the reset is complete and the PF is responding to us */
3006         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3007                 /* sleep first to make sure a minimum wait time is met */
3008                 msleep(IAVF_RESET_WAIT_MS);
3009
3010                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3011                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3012                 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3013                         break;
3014         }
3015
3016         pci_set_master(adapter->pdev);
3017         pci_restore_msi_state(adapter->pdev);
3018
3019         if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3020                 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3021                         reg_val);
3022                 iavf_disable_vf(adapter);
3023                 mutex_unlock(&adapter->client_lock);
3024                 mutex_unlock(&adapter->crit_lock);
3025                 if (netif_running(netdev)) {
3026                         rtnl_lock();
3027                         dev_close(netdev);
3028                         rtnl_unlock();
3029                 }
3030                 return; /* Do not attempt to reinit. It's dead, Jim. */
3031         }
3032
3033 continue_reset:
3034         /* We don't use netif_running() because it may be true prior to
3035          * ndo_open() returning, so we can't assume it means all our open
3036          * tasks have finished, since we're not holding the rtnl_lock here.
3037          */
3038         running = adapter->state == __IAVF_RUNNING;
3039
3040         if (running) {
3041                 netif_carrier_off(netdev);
3042                 netif_tx_stop_all_queues(netdev);
3043                 adapter->link_up = false;
3044                 iavf_napi_disable_all(adapter);
3045         }
3046         iavf_irq_disable(adapter);
3047
3048         iavf_change_state(adapter, __IAVF_RESETTING);
3049         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3050
3051         /* free the Tx/Rx rings and descriptors, might be better to just
3052          * re-use them sometime in the future
3053          */
3054         iavf_free_all_rx_resources(adapter);
3055         iavf_free_all_tx_resources(adapter);
3056
3057         adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3058         /* kill and reinit the admin queue */
3059         iavf_shutdown_adminq(hw);
3060         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3061         status = iavf_init_adminq(hw);
3062         if (status) {
3063                 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3064                          status);
3065                 goto reset_err;
3066         }
3067         adapter->aq_required = 0;
3068
3069         if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3070             (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3071                 err = iavf_reinit_interrupt_scheme(adapter);
3072                 if (err)
3073                         goto reset_err;
3074         }
3075
3076         if (RSS_AQ(adapter)) {
3077                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3078         } else {
3079                 err = iavf_init_rss(adapter);
3080                 if (err)
3081                         goto reset_err;
3082         }
3083
3084         adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3085         /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3086          * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3087          * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3088          * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3089          * been successfully sent and negotiated
3090          */
3091         adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3092         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3093
3094         spin_lock_bh(&adapter->mac_vlan_list_lock);
3095
3096         /* Delete filter for the current MAC address, it could have
3097          * been changed by the PF via administratively set MAC.
3098          * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3099          */
3100         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3101                 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3102                         list_del(&f->list);
3103                         kfree(f);
3104                 }
3105         }
3106         /* re-add all MAC filters */
3107         list_for_each_entry(f, &adapter->mac_filter_list, list) {
3108                 f->add = true;
3109         }
3110         spin_unlock_bh(&adapter->mac_vlan_list_lock);
3111
3112         /* check if TCs are running and re-add all cloud filters */
3113         spin_lock_bh(&adapter->cloud_filter_list_lock);
3114         if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3115             adapter->num_tc) {
3116                 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3117                         cf->add = true;
3118                 }
3119         }
3120         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3121
3122         adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3123         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3124         iavf_misc_irq_enable(adapter);
3125
3126         bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
3127         bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
3128
3129         mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
3130
3131         /* We were running when the reset started, so we need to restore some
3132          * state here.
3133          */
3134         if (running) {
3135                 /* allocate transmit descriptors */
3136                 err = iavf_setup_all_tx_resources(adapter);
3137                 if (err)
3138                         goto reset_err;
3139
3140                 /* allocate receive descriptors */
3141                 err = iavf_setup_all_rx_resources(adapter);
3142                 if (err)
3143                         goto reset_err;
3144
3145                 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3146                     (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3147                         err = iavf_request_traffic_irqs(adapter, netdev->name);
3148                         if (err)
3149                                 goto reset_err;
3150
3151                         adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3152                 }
3153
3154                 iavf_configure(adapter);
3155
3156                 /* iavf_up_complete() will switch device back
3157                  * to __IAVF_RUNNING
3158                  */
3159                 iavf_up_complete(adapter);
3160
3161                 iavf_irq_enable(adapter, true);
3162         } else {
3163                 iavf_change_state(adapter, __IAVF_DOWN);
3164                 wake_up(&adapter->down_waitqueue);
3165         }
3166
3167         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3168
3169         mutex_unlock(&adapter->client_lock);
3170         mutex_unlock(&adapter->crit_lock);
3171
3172         goto reset_finish;
3173 reset_err:
3174         if (running) {
3175                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3176                 iavf_free_traffic_irqs(adapter);
3177         }
3178         iavf_disable_vf(adapter);
3179
3180         mutex_unlock(&adapter->client_lock);
3181         mutex_unlock(&adapter->crit_lock);
3182
3183         if (netif_running(netdev)) {
3184                 /* Close device to ensure that Tx queues will not be started
3185                  * during netif_device_attach() at the end of the reset task.
3186                  */
3187                 rtnl_lock();
3188                 dev_close(netdev);
3189                 rtnl_unlock();
3190         }
3191
3192         dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3193 reset_finish:
3194         rtnl_lock();
3195         netif_device_attach(netdev);
3196         rtnl_unlock();
3197 }
3198
3199 /**
3200  * iavf_adminq_task - worker thread to clean the admin queue
3201  * @work: pointer to work_struct containing our data
3202  **/
3203 static void iavf_adminq_task(struct work_struct *work)
3204 {
3205         struct iavf_adapter *adapter =
3206                 container_of(work, struct iavf_adapter, adminq_task);
3207         struct iavf_hw *hw = &adapter->hw;
3208         struct iavf_arq_event_info event;
3209         enum virtchnl_ops v_op;
3210         enum iavf_status ret, v_ret;
3211         u32 val, oldval;
3212         u16 pending;
3213
3214         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3215                 goto out;
3216
3217         if (!mutex_trylock(&adapter->crit_lock)) {
3218                 if (adapter->state == __IAVF_REMOVE)
3219                         return;
3220
3221                 queue_work(adapter->wq, &adapter->adminq_task);
3222                 goto out;
3223         }
3224
3225         event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3226         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3227         if (!event.msg_buf)
3228                 goto out;
3229
3230         do {
3231                 ret = iavf_clean_arq_element(hw, &event, &pending);
3232                 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3233                 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3234
3235                 if (ret || !v_op)
3236                         break; /* No event to process or error cleaning ARQ */
3237
3238                 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3239                                          event.msg_len);
3240                 if (pending != 0)
3241                         memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3242         } while (pending);
3243         mutex_unlock(&adapter->crit_lock);
3244
3245         if ((adapter->flags &
3246              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3247             adapter->state == __IAVF_RESETTING)
3248                 goto freedom;
3249
3250         /* check for error indications */
3251         val = rd32(hw, hw->aq.arq.len);
3252         if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3253                 goto freedom;
3254         oldval = val;
3255         if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3256                 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3257                 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3258         }
3259         if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3260                 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3261                 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3262         }
3263         if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3264                 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3265                 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3266         }
3267         if (oldval != val)
3268                 wr32(hw, hw->aq.arq.len, val);
3269
3270         val = rd32(hw, hw->aq.asq.len);
3271         oldval = val;
3272         if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3273                 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3274                 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3275         }
3276         if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3277                 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3278                 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3279         }
3280         if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3281                 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3282                 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3283         }
3284         if (oldval != val)
3285                 wr32(hw, hw->aq.asq.len, val);
3286
3287 freedom:
3288         kfree(event.msg_buf);
3289 out:
3290         /* re-enable Admin queue interrupt cause */
3291         iavf_misc_irq_enable(adapter);
3292 }
3293
3294 /**
3295  * iavf_client_task - worker thread to perform client work
3296  * @work: pointer to work_struct containing our data
3297  *
3298  * This task handles client interactions. Because client calls can be
3299  * reentrant, we can't handle them in the watchdog.
3300  **/
3301 static void iavf_client_task(struct work_struct *work)
3302 {
3303         struct iavf_adapter *adapter =
3304                 container_of(work, struct iavf_adapter, client_task.work);
3305
3306         /* If we can't get the client bit, just give up. We'll be rescheduled
3307          * later.
3308          */
3309
3310         if (!mutex_trylock(&adapter->client_lock))
3311                 return;
3312
3313         if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3314                 iavf_client_subtask(adapter);
3315                 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3316                 goto out;
3317         }
3318         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3319                 iavf_notify_client_l2_params(&adapter->vsi);
3320                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3321                 goto out;
3322         }
3323         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3324                 iavf_notify_client_close(&adapter->vsi, false);
3325                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3326                 goto out;
3327         }
3328         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3329                 iavf_notify_client_open(&adapter->vsi);
3330                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3331         }
3332 out:
3333         mutex_unlock(&adapter->client_lock);
3334 }
3335
3336 /**
3337  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3338  * @adapter: board private structure
3339  *
3340  * Free all transmit software resources
3341  **/
3342 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3343 {
3344         int i;
3345
3346         if (!adapter->tx_rings)
3347                 return;
3348
3349         for (i = 0; i < adapter->num_active_queues; i++)
3350                 if (adapter->tx_rings[i].desc)
3351                         iavf_free_tx_resources(&adapter->tx_rings[i]);
3352 }
3353
3354 /**
3355  * iavf_setup_all_tx_resources - allocate all queues Tx resources
3356  * @adapter: board private structure
3357  *
3358  * If this function returns with an error, then it's possible one or
3359  * more of the rings is populated (while the rest are not).  It is the
3360  * callers duty to clean those orphaned rings.
3361  *
3362  * Return 0 on success, negative on failure
3363  **/
3364 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3365 {
3366         int i, err = 0;
3367
3368         for (i = 0; i < adapter->num_active_queues; i++) {
3369                 adapter->tx_rings[i].count = adapter->tx_desc_count;
3370                 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3371                 if (!err)
3372                         continue;
3373                 dev_err(&adapter->pdev->dev,
3374                         "Allocation for Tx Queue %u failed\n", i);
3375                 break;
3376         }
3377
3378         return err;
3379 }
3380
3381 /**
3382  * iavf_setup_all_rx_resources - allocate all queues Rx resources
3383  * @adapter: board private structure
3384  *
3385  * If this function returns with an error, then it's possible one or
3386  * more of the rings is populated (while the rest are not).  It is the
3387  * callers duty to clean those orphaned rings.
3388  *
3389  * Return 0 on success, negative on failure
3390  **/
3391 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3392 {
3393         int i, err = 0;
3394
3395         for (i = 0; i < adapter->num_active_queues; i++) {
3396                 adapter->rx_rings[i].count = adapter->rx_desc_count;
3397                 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3398                 if (!err)
3399                         continue;
3400                 dev_err(&adapter->pdev->dev,
3401                         "Allocation for Rx Queue %u failed\n", i);
3402                 break;
3403         }
3404         return err;
3405 }
3406
3407 /**
3408  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3409  * @adapter: board private structure
3410  *
3411  * Free all receive software resources
3412  **/
3413 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3414 {
3415         int i;
3416
3417         if (!adapter->rx_rings)
3418                 return;
3419
3420         for (i = 0; i < adapter->num_active_queues; i++)
3421                 if (adapter->rx_rings[i].desc)
3422                         iavf_free_rx_resources(&adapter->rx_rings[i]);
3423 }
3424
3425 /**
3426  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3427  * @adapter: board private structure
3428  * @max_tx_rate: max Tx bw for a tc
3429  **/
3430 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3431                                       u64 max_tx_rate)
3432 {
3433         int speed = 0, ret = 0;
3434
3435         if (ADV_LINK_SUPPORT(adapter)) {
3436                 if (adapter->link_speed_mbps < U32_MAX) {
3437                         speed = adapter->link_speed_mbps;
3438                         goto validate_bw;
3439                 } else {
3440                         dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3441                         return -EINVAL;
3442                 }
3443         }
3444
3445         switch (adapter->link_speed) {
3446         case VIRTCHNL_LINK_SPEED_40GB:
3447                 speed = SPEED_40000;
3448                 break;
3449         case VIRTCHNL_LINK_SPEED_25GB:
3450                 speed = SPEED_25000;
3451                 break;
3452         case VIRTCHNL_LINK_SPEED_20GB:
3453                 speed = SPEED_20000;
3454                 break;
3455         case VIRTCHNL_LINK_SPEED_10GB:
3456                 speed = SPEED_10000;
3457                 break;
3458         case VIRTCHNL_LINK_SPEED_5GB:
3459                 speed = SPEED_5000;
3460                 break;
3461         case VIRTCHNL_LINK_SPEED_2_5GB:
3462                 speed = SPEED_2500;
3463                 break;
3464         case VIRTCHNL_LINK_SPEED_1GB:
3465                 speed = SPEED_1000;
3466                 break;
3467         case VIRTCHNL_LINK_SPEED_100MB:
3468                 speed = SPEED_100;
3469                 break;
3470         default:
3471                 break;
3472         }
3473
3474 validate_bw:
3475         if (max_tx_rate > speed) {
3476                 dev_err(&adapter->pdev->dev,
3477                         "Invalid tx rate specified\n");
3478                 ret = -EINVAL;
3479         }
3480
3481         return ret;
3482 }
3483
3484 /**
3485  * iavf_validate_ch_config - validate queue mapping info
3486  * @adapter: board private structure
3487  * @mqprio_qopt: queue parameters
3488  *
3489  * This function validates if the config provided by the user to
3490  * configure queue channels is valid or not. Returns 0 on a valid
3491  * config.
3492  **/
3493 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3494                                    struct tc_mqprio_qopt_offload *mqprio_qopt)
3495 {
3496         u64 total_max_rate = 0;
3497         u32 tx_rate_rem = 0;
3498         int i, num_qps = 0;
3499         u64 tx_rate = 0;
3500         int ret = 0;
3501
3502         if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3503             mqprio_qopt->qopt.num_tc < 1)
3504                 return -EINVAL;
3505
3506         for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3507                 if (!mqprio_qopt->qopt.count[i] ||
3508                     mqprio_qopt->qopt.offset[i] != num_qps)
3509                         return -EINVAL;
3510                 if (mqprio_qopt->min_rate[i]) {
3511                         dev_err(&adapter->pdev->dev,
3512                                 "Invalid min tx rate (greater than 0) specified for TC%d\n",
3513                                 i);
3514                         return -EINVAL;
3515                 }
3516
3517                 /* convert to Mbps */
3518                 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3519                                   IAVF_MBPS_DIVISOR);
3520
3521                 if (mqprio_qopt->max_rate[i] &&
3522                     tx_rate < IAVF_MBPS_QUANTA) {
3523                         dev_err(&adapter->pdev->dev,
3524                                 "Invalid max tx rate for TC%d, minimum %dMbps\n",
3525                                 i, IAVF_MBPS_QUANTA);
3526                         return -EINVAL;
3527                 }
3528
3529                 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3530
3531                 if (tx_rate_rem != 0) {
3532                         dev_err(&adapter->pdev->dev,
3533                                 "Invalid max tx rate for TC%d, not divisible by %d\n",
3534                                 i, IAVF_MBPS_QUANTA);
3535                         return -EINVAL;
3536                 }
3537
3538                 total_max_rate += tx_rate;
3539                 num_qps += mqprio_qopt->qopt.count[i];
3540         }
3541         if (num_qps > adapter->num_active_queues) {
3542                 dev_err(&adapter->pdev->dev,
3543                         "Cannot support requested number of queues\n");
3544                 return -EINVAL;
3545         }
3546
3547         ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3548         return ret;
3549 }
3550
3551 /**
3552  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3553  * @adapter: board private structure
3554  **/
3555 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3556 {
3557         struct iavf_cloud_filter *cf, *cftmp;
3558
3559         spin_lock_bh(&adapter->cloud_filter_list_lock);
3560         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3561                                  list) {
3562                 list_del(&cf->list);
3563                 kfree(cf);
3564                 adapter->num_cloud_filters--;
3565         }
3566         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3567 }
3568
3569 /**
3570  * __iavf_setup_tc - configure multiple traffic classes
3571  * @netdev: network interface device structure
3572  * @type_data: tc offload data
3573  *
3574  * This function processes the config information provided by the
3575  * user to configure traffic classes/queue channels and packages the
3576  * information to request the PF to setup traffic classes.
3577  *
3578  * Returns 0 on success.
3579  **/
3580 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3581 {
3582         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3583         struct iavf_adapter *adapter = netdev_priv(netdev);
3584         struct virtchnl_vf_resource *vfres = adapter->vf_res;
3585         u8 num_tc = 0, total_qps = 0;
3586         int ret = 0, netdev_tc = 0;
3587         u64 max_tx_rate;
3588         u16 mode;
3589         int i;
3590
3591         num_tc = mqprio_qopt->qopt.num_tc;
3592         mode = mqprio_qopt->mode;
3593
3594         /* delete queue_channel */
3595         if (!mqprio_qopt->qopt.hw) {
3596                 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3597                         /* reset the tc configuration */
3598                         netdev_reset_tc(netdev);
3599                         adapter->num_tc = 0;
3600                         netif_tx_stop_all_queues(netdev);
3601                         netif_tx_disable(netdev);
3602                         iavf_del_all_cloud_filters(adapter);
3603                         adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3604                         total_qps = adapter->orig_num_active_queues;
3605                         goto exit;
3606                 } else {
3607                         return -EINVAL;
3608                 }
3609         }
3610
3611         /* add queue channel */
3612         if (mode == TC_MQPRIO_MODE_CHANNEL) {
3613                 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3614                         dev_err(&adapter->pdev->dev, "ADq not supported\n");
3615                         return -EOPNOTSUPP;
3616                 }
3617                 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3618                         dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3619                         return -EINVAL;
3620                 }
3621
3622                 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3623                 if (ret)
3624                         return ret;
3625                 /* Return if same TC config is requested */
3626                 if (adapter->num_tc == num_tc)
3627                         return 0;
3628                 adapter->num_tc = num_tc;
3629
3630                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3631                         if (i < num_tc) {
3632                                 adapter->ch_config.ch_info[i].count =
3633                                         mqprio_qopt->qopt.count[i];
3634                                 adapter->ch_config.ch_info[i].offset =
3635                                         mqprio_qopt->qopt.offset[i];
3636                                 total_qps += mqprio_qopt->qopt.count[i];
3637                                 max_tx_rate = mqprio_qopt->max_rate[i];
3638                                 /* convert to Mbps */
3639                                 max_tx_rate = div_u64(max_tx_rate,
3640                                                       IAVF_MBPS_DIVISOR);
3641                                 adapter->ch_config.ch_info[i].max_tx_rate =
3642                                         max_tx_rate;
3643                         } else {
3644                                 adapter->ch_config.ch_info[i].count = 1;
3645                                 adapter->ch_config.ch_info[i].offset = 0;
3646                         }
3647                 }
3648
3649                 /* Take snapshot of original config such as "num_active_queues"
3650                  * It is used later when delete ADQ flow is exercised, so that
3651                  * once delete ADQ flow completes, VF shall go back to its
3652                  * original queue configuration
3653                  */
3654
3655                 adapter->orig_num_active_queues = adapter->num_active_queues;
3656
3657                 /* Store queue info based on TC so that VF gets configured
3658                  * with correct number of queues when VF completes ADQ config
3659                  * flow
3660                  */
3661                 adapter->ch_config.total_qps = total_qps;
3662
3663                 netif_tx_stop_all_queues(netdev);
3664                 netif_tx_disable(netdev);
3665                 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3666                 netdev_reset_tc(netdev);
3667                 /* Report the tc mapping up the stack */
3668                 netdev_set_num_tc(adapter->netdev, num_tc);
3669                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3670                         u16 qcount = mqprio_qopt->qopt.count[i];
3671                         u16 qoffset = mqprio_qopt->qopt.offset[i];
3672
3673                         if (i < num_tc)
3674                                 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3675                                                     qoffset);
3676                 }
3677         }
3678 exit:
3679         if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3680                 return 0;
3681
3682         netif_set_real_num_rx_queues(netdev, total_qps);
3683         netif_set_real_num_tx_queues(netdev, total_qps);
3684
3685         return ret;
3686 }
3687
3688 /**
3689  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3690  * @adapter: board private structure
3691  * @f: pointer to struct flow_cls_offload
3692  * @filter: pointer to cloud filter structure
3693  */
3694 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3695                                  struct flow_cls_offload *f,
3696                                  struct iavf_cloud_filter *filter)
3697 {
3698         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3699         struct flow_dissector *dissector = rule->match.dissector;
3700         u16 n_proto_mask = 0;
3701         u16 n_proto_key = 0;
3702         u8 field_flags = 0;
3703         u16 addr_type = 0;
3704         u16 n_proto = 0;
3705         int i = 0;
3706         struct virtchnl_filter *vf = &filter->f;
3707
3708         if (dissector->used_keys &
3709             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3710               BIT(FLOW_DISSECTOR_KEY_BASIC) |
3711               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3712               BIT(FLOW_DISSECTOR_KEY_VLAN) |
3713               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3714               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3715               BIT(FLOW_DISSECTOR_KEY_PORTS) |
3716               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3717                 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3718                         dissector->used_keys);
3719                 return -EOPNOTSUPP;
3720         }
3721
3722         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3723                 struct flow_match_enc_keyid match;
3724
3725                 flow_rule_match_enc_keyid(rule, &match);
3726                 if (match.mask->keyid != 0)
3727                         field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3728         }
3729
3730         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3731                 struct flow_match_basic match;
3732
3733                 flow_rule_match_basic(rule, &match);
3734                 n_proto_key = ntohs(match.key->n_proto);
3735                 n_proto_mask = ntohs(match.mask->n_proto);
3736
3737                 if (n_proto_key == ETH_P_ALL) {
3738                         n_proto_key = 0;
3739                         n_proto_mask = 0;
3740                 }
3741                 n_proto = n_proto_key & n_proto_mask;
3742                 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3743                         return -EINVAL;
3744                 if (n_proto == ETH_P_IPV6) {
3745                         /* specify flow type as TCP IPv6 */
3746                         vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3747                 }
3748
3749                 if (match.key->ip_proto != IPPROTO_TCP) {
3750                         dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3751                         return -EINVAL;
3752                 }
3753         }
3754
3755         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3756                 struct flow_match_eth_addrs match;
3757
3758                 flow_rule_match_eth_addrs(rule, &match);
3759
3760                 /* use is_broadcast and is_zero to check for all 0xf or 0 */
3761                 if (!is_zero_ether_addr(match.mask->dst)) {
3762                         if (is_broadcast_ether_addr(match.mask->dst)) {
3763                                 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3764                         } else {
3765                                 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3766                                         match.mask->dst);
3767                                 return -EINVAL;
3768                         }
3769                 }
3770
3771                 if (!is_zero_ether_addr(match.mask->src)) {
3772                         if (is_broadcast_ether_addr(match.mask->src)) {
3773                                 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3774                         } else {
3775                                 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3776                                         match.mask->src);
3777                                 return -EINVAL;
3778                         }
3779                 }
3780
3781                 if (!is_zero_ether_addr(match.key->dst))
3782                         if (is_valid_ether_addr(match.key->dst) ||
3783                             is_multicast_ether_addr(match.key->dst)) {
3784                                 /* set the mask if a valid dst_mac address */
3785                                 for (i = 0; i < ETH_ALEN; i++)
3786                                         vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3787                                 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3788                                                 match.key->dst);
3789                         }
3790
3791                 if (!is_zero_ether_addr(match.key->src))
3792                         if (is_valid_ether_addr(match.key->src) ||
3793                             is_multicast_ether_addr(match.key->src)) {
3794                                 /* set the mask if a valid dst_mac address */
3795                                 for (i = 0; i < ETH_ALEN; i++)
3796                                         vf->mask.tcp_spec.src_mac[i] |= 0xff;
3797                                 ether_addr_copy(vf->data.tcp_spec.src_mac,
3798                                                 match.key->src);
3799                 }
3800         }
3801
3802         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3803                 struct flow_match_vlan match;
3804
3805                 flow_rule_match_vlan(rule, &match);
3806                 if (match.mask->vlan_id) {
3807                         if (match.mask->vlan_id == VLAN_VID_MASK) {
3808                                 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3809                         } else {
3810                                 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3811                                         match.mask->vlan_id);
3812                                 return -EINVAL;
3813                         }
3814                 }
3815                 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3816                 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3817         }
3818
3819         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3820                 struct flow_match_control match;
3821
3822                 flow_rule_match_control(rule, &match);
3823                 addr_type = match.key->addr_type;
3824         }
3825
3826         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3827                 struct flow_match_ipv4_addrs match;
3828
3829                 flow_rule_match_ipv4_addrs(rule, &match);
3830                 if (match.mask->dst) {
3831                         if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3832                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3833                         } else {
3834                                 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3835                                         be32_to_cpu(match.mask->dst));
3836                                 return -EINVAL;
3837                         }
3838                 }
3839
3840                 if (match.mask->src) {
3841                         if (match.mask->src == cpu_to_be32(0xffffffff)) {
3842                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3843                         } else {
3844                                 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3845                                         be32_to_cpu(match.mask->src));
3846                                 return -EINVAL;
3847                         }
3848                 }
3849
3850                 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3851                         dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3852                         return -EINVAL;
3853                 }
3854                 if (match.key->dst) {
3855                         vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3856                         vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3857                 }
3858                 if (match.key->src) {
3859                         vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3860                         vf->data.tcp_spec.src_ip[0] = match.key->src;
3861                 }
3862         }
3863
3864         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3865                 struct flow_match_ipv6_addrs match;
3866
3867                 flow_rule_match_ipv6_addrs(rule, &match);
3868
3869                 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3870                 if (ipv6_addr_any(&match.mask->dst)) {
3871                         dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3872                                 IPV6_ADDR_ANY);
3873                         return -EINVAL;
3874                 }
3875
3876                 /* src and dest IPv6 address should not be LOOPBACK
3877                  * (0:0:0:0:0:0:0:1) which can be represented as ::1
3878                  */
3879                 if (ipv6_addr_loopback(&match.key->dst) ||
3880                     ipv6_addr_loopback(&match.key->src)) {
3881                         dev_err(&adapter->pdev->dev,
3882                                 "ipv6 addr should not be loopback\n");
3883                         return -EINVAL;
3884                 }
3885                 if (!ipv6_addr_any(&match.mask->dst) ||
3886                     !ipv6_addr_any(&match.mask->src))
3887                         field_flags |= IAVF_CLOUD_FIELD_IIP;
3888
3889                 for (i = 0; i < 4; i++)
3890                         vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3891                 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3892                        sizeof(vf->data.tcp_spec.dst_ip));
3893                 for (i = 0; i < 4; i++)
3894                         vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3895                 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3896                        sizeof(vf->data.tcp_spec.src_ip));
3897         }
3898         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3899                 struct flow_match_ports match;
3900
3901                 flow_rule_match_ports(rule, &match);
3902                 if (match.mask->src) {
3903                         if (match.mask->src == cpu_to_be16(0xffff)) {
3904                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3905                         } else {
3906                                 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3907                                         be16_to_cpu(match.mask->src));
3908                                 return -EINVAL;
3909                         }
3910                 }
3911
3912                 if (match.mask->dst) {
3913                         if (match.mask->dst == cpu_to_be16(0xffff)) {
3914                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
3915                         } else {
3916                                 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3917                                         be16_to_cpu(match.mask->dst));
3918                                 return -EINVAL;
3919                         }
3920                 }
3921                 if (match.key->dst) {
3922                         vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3923                         vf->data.tcp_spec.dst_port = match.key->dst;
3924                 }
3925
3926                 if (match.key->src) {
3927                         vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3928                         vf->data.tcp_spec.src_port = match.key->src;
3929                 }
3930         }
3931         vf->field_flags = field_flags;
3932
3933         return 0;
3934 }
3935
3936 /**
3937  * iavf_handle_tclass - Forward to a traffic class on the device
3938  * @adapter: board private structure
3939  * @tc: traffic class index on the device
3940  * @filter: pointer to cloud filter structure
3941  */
3942 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3943                               struct iavf_cloud_filter *filter)
3944 {
3945         if (tc == 0)
3946                 return 0;
3947         if (tc < adapter->num_tc) {
3948                 if (!filter->f.data.tcp_spec.dst_port) {
3949                         dev_err(&adapter->pdev->dev,
3950                                 "Specify destination port to redirect to traffic class other than TC0\n");
3951                         return -EINVAL;
3952                 }
3953         }
3954         /* redirect to a traffic class on the same device */
3955         filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3956         filter->f.action_meta = tc;
3957         return 0;
3958 }
3959
3960 /**
3961  * iavf_find_cf - Find the cloud filter in the list
3962  * @adapter: Board private structure
3963  * @cookie: filter specific cookie
3964  *
3965  * Returns ptr to the filter object or NULL. Must be called while holding the
3966  * cloud_filter_list_lock.
3967  */
3968 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3969                                               unsigned long *cookie)
3970 {
3971         struct iavf_cloud_filter *filter = NULL;
3972
3973         if (!cookie)
3974                 return NULL;
3975
3976         list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3977                 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3978                         return filter;
3979         }
3980         return NULL;
3981 }
3982
3983 /**
3984  * iavf_configure_clsflower - Add tc flower filters
3985  * @adapter: board private structure
3986  * @cls_flower: Pointer to struct flow_cls_offload
3987  */
3988 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3989                                     struct flow_cls_offload *cls_flower)
3990 {
3991         int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3992         struct iavf_cloud_filter *filter = NULL;
3993         int err = -EINVAL, count = 50;
3994
3995         if (tc < 0) {
3996                 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3997                 return -EINVAL;
3998         }
3999
4000         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
4001         if (!filter)
4002                 return -ENOMEM;
4003
4004         while (!mutex_trylock(&adapter->crit_lock)) {
4005                 if (--count == 0) {
4006                         kfree(filter);
4007                         return err;
4008                 }
4009                 udelay(1);
4010         }
4011
4012         filter->cookie = cls_flower->cookie;
4013
4014         /* bail out here if filter already exists */
4015         spin_lock_bh(&adapter->cloud_filter_list_lock);
4016         if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4017                 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4018                 err = -EEXIST;
4019                 goto spin_unlock;
4020         }
4021         spin_unlock_bh(&adapter->cloud_filter_list_lock);
4022
4023         /* set the mask to all zeroes to begin with */
4024         memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4025         /* start out with flow type and eth type IPv4 to begin with */
4026         filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4027         err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4028         if (err)
4029                 goto err;
4030
4031         err = iavf_handle_tclass(adapter, tc, filter);
4032         if (err)
4033                 goto err;
4034
4035         /* add filter to the list */
4036         spin_lock_bh(&adapter->cloud_filter_list_lock);
4037         list_add_tail(&filter->list, &adapter->cloud_filter_list);
4038         adapter->num_cloud_filters++;
4039         filter->add = true;
4040         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4041 spin_unlock:
4042         spin_unlock_bh(&adapter->cloud_filter_list_lock);
4043 err:
4044         if (err)
4045                 kfree(filter);
4046
4047         mutex_unlock(&adapter->crit_lock);
4048         return err;
4049 }
4050
4051 /**
4052  * iavf_delete_clsflower - Remove tc flower filters
4053  * @adapter: board private structure
4054  * @cls_flower: Pointer to struct flow_cls_offload
4055  */
4056 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4057                                  struct flow_cls_offload *cls_flower)
4058 {
4059         struct iavf_cloud_filter *filter = NULL;
4060         int err = 0;
4061
4062         spin_lock_bh(&adapter->cloud_filter_list_lock);
4063         filter = iavf_find_cf(adapter, &cls_flower->cookie);
4064         if (filter) {
4065                 filter->del = true;
4066                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4067         } else {
4068                 err = -EINVAL;
4069         }
4070         spin_unlock_bh(&adapter->cloud_filter_list_lock);
4071
4072         return err;
4073 }
4074
4075 /**
4076  * iavf_setup_tc_cls_flower - flower classifier offloads
4077  * @adapter: board private structure
4078  * @cls_flower: pointer to flow_cls_offload struct with flow info
4079  */
4080 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4081                                     struct flow_cls_offload *cls_flower)
4082 {
4083         switch (cls_flower->command) {
4084         case FLOW_CLS_REPLACE:
4085                 return iavf_configure_clsflower(adapter, cls_flower);
4086         case FLOW_CLS_DESTROY:
4087                 return iavf_delete_clsflower(adapter, cls_flower);
4088         case FLOW_CLS_STATS:
4089                 return -EOPNOTSUPP;
4090         default:
4091                 return -EOPNOTSUPP;
4092         }
4093 }
4094
4095 /**
4096  * iavf_setup_tc_block_cb - block callback for tc
4097  * @type: type of offload
4098  * @type_data: offload data
4099  * @cb_priv:
4100  *
4101  * This function is the block callback for traffic classes
4102  **/
4103 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4104                                   void *cb_priv)
4105 {
4106         struct iavf_adapter *adapter = cb_priv;
4107
4108         if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4109                 return -EOPNOTSUPP;
4110
4111         switch (type) {
4112         case TC_SETUP_CLSFLOWER:
4113                 return iavf_setup_tc_cls_flower(cb_priv, type_data);
4114         default:
4115                 return -EOPNOTSUPP;
4116         }
4117 }
4118
4119 static LIST_HEAD(iavf_block_cb_list);
4120
4121 /**
4122  * iavf_setup_tc - configure multiple traffic classes
4123  * @netdev: network interface device structure
4124  * @type: type of offload
4125  * @type_data: tc offload data
4126  *
4127  * This function is the callback to ndo_setup_tc in the
4128  * netdev_ops.
4129  *
4130  * Returns 0 on success
4131  **/
4132 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4133                          void *type_data)
4134 {
4135         struct iavf_adapter *adapter = netdev_priv(netdev);
4136
4137         switch (type) {
4138         case TC_SETUP_QDISC_MQPRIO:
4139                 return __iavf_setup_tc(netdev, type_data);
4140         case TC_SETUP_BLOCK:
4141                 return flow_block_cb_setup_simple(type_data,
4142                                                   &iavf_block_cb_list,
4143                                                   iavf_setup_tc_block_cb,
4144                                                   adapter, adapter, true);
4145         default:
4146                 return -EOPNOTSUPP;
4147         }
4148 }
4149
4150 /**
4151  * iavf_open - Called when a network interface is made active
4152  * @netdev: network interface device structure
4153  *
4154  * Returns 0 on success, negative value on failure
4155  *
4156  * The open entry point is called when a network interface is made
4157  * active by the system (IFF_UP).  At this point all resources needed
4158  * for transmit and receive operations are allocated, the interrupt
4159  * handler is registered with the OS, the watchdog is started,
4160  * and the stack is notified that the interface is ready.
4161  **/
4162 static int iavf_open(struct net_device *netdev)
4163 {
4164         struct iavf_adapter *adapter = netdev_priv(netdev);
4165         int err;
4166
4167         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4168                 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4169                 return -EIO;
4170         }
4171
4172         while (!mutex_trylock(&adapter->crit_lock)) {
4173                 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4174                  * is already taken and iavf_open is called from an upper
4175                  * device's notifier reacting on NETDEV_REGISTER event.
4176                  * We have to leave here to avoid dead lock.
4177                  */
4178                 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4179                         return -EBUSY;
4180
4181                 usleep_range(500, 1000);
4182         }
4183
4184         if (adapter->state != __IAVF_DOWN) {
4185                 err = -EBUSY;
4186                 goto err_unlock;
4187         }
4188
4189         if (adapter->state == __IAVF_RUNNING &&
4190             !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4191                 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4192                 err = 0;
4193                 goto err_unlock;
4194         }
4195
4196         /* allocate transmit descriptors */
4197         err = iavf_setup_all_tx_resources(adapter);
4198         if (err)
4199                 goto err_setup_tx;
4200
4201         /* allocate receive descriptors */
4202         err = iavf_setup_all_rx_resources(adapter);
4203         if (err)
4204                 goto err_setup_rx;
4205
4206         /* clear any pending interrupts, may auto mask */
4207         err = iavf_request_traffic_irqs(adapter, netdev->name);
4208         if (err)
4209                 goto err_req_irq;
4210
4211         spin_lock_bh(&adapter->mac_vlan_list_lock);
4212
4213         iavf_add_filter(adapter, adapter->hw.mac.addr);
4214
4215         spin_unlock_bh(&adapter->mac_vlan_list_lock);
4216
4217         /* Restore VLAN filters that were removed with IFF_DOWN */
4218         iavf_restore_filters(adapter);
4219
4220         iavf_configure(adapter);
4221
4222         iavf_up_complete(adapter);
4223
4224         iavf_irq_enable(adapter, true);
4225
4226         mutex_unlock(&adapter->crit_lock);
4227
4228         return 0;
4229
4230 err_req_irq:
4231         iavf_down(adapter);
4232         iavf_free_traffic_irqs(adapter);
4233 err_setup_rx:
4234         iavf_free_all_rx_resources(adapter);
4235 err_setup_tx:
4236         iavf_free_all_tx_resources(adapter);
4237 err_unlock:
4238         mutex_unlock(&adapter->crit_lock);
4239
4240         return err;
4241 }
4242
4243 /**
4244  * iavf_close - Disables a network interface
4245  * @netdev: network interface device structure
4246  *
4247  * Returns 0, this is not allowed to fail
4248  *
4249  * The close entry point is called when an interface is de-activated
4250  * by the OS.  The hardware is still under the drivers control, but
4251  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4252  * are freed, along with all transmit and receive resources.
4253  **/
4254 static int iavf_close(struct net_device *netdev)
4255 {
4256         struct iavf_adapter *adapter = netdev_priv(netdev);
4257         u64 aq_to_restore;
4258         int status;
4259
4260         mutex_lock(&adapter->crit_lock);
4261
4262         if (adapter->state <= __IAVF_DOWN_PENDING) {
4263                 mutex_unlock(&adapter->crit_lock);
4264                 return 0;
4265         }
4266
4267         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4268         if (CLIENT_ENABLED(adapter))
4269                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4270         /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4271          * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4272          * deadlock with adminq_task() until iavf_close timeouts. We must send
4273          * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4274          * disable queues possible for vf. Give only necessary flags to
4275          * iavf_down and save other to set them right before iavf_close()
4276          * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4277          * iavf will be in DOWN state.
4278          */
4279         aq_to_restore = adapter->aq_required;
4280         adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4281
4282         /* Remove flags which we do not want to send after close or we want to
4283          * send before disable queues.
4284          */
4285         aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG              |
4286                            IAVF_FLAG_AQ_ENABLE_QUEUES           |
4287                            IAVF_FLAG_AQ_CONFIGURE_QUEUES        |
4288                            IAVF_FLAG_AQ_ADD_VLAN_FILTER         |
4289                            IAVF_FLAG_AQ_ADD_MAC_FILTER          |
4290                            IAVF_FLAG_AQ_ADD_CLOUD_FILTER        |
4291                            IAVF_FLAG_AQ_ADD_FDIR_FILTER         |
4292                            IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4293
4294         iavf_down(adapter);
4295         iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4296         iavf_free_traffic_irqs(adapter);
4297
4298         mutex_unlock(&adapter->crit_lock);
4299
4300         /* We explicitly don't free resources here because the hardware is
4301          * still active and can DMA into memory. Resources are cleared in
4302          * iavf_virtchnl_completion() after we get confirmation from the PF
4303          * driver that the rings have been stopped.
4304          *
4305          * Also, we wait for state to transition to __IAVF_DOWN before
4306          * returning. State change occurs in iavf_virtchnl_completion() after
4307          * VF resources are released (which occurs after PF driver processes and
4308          * responds to admin queue commands).
4309          */
4310
4311         status = wait_event_timeout(adapter->down_waitqueue,
4312                                     adapter->state == __IAVF_DOWN,
4313                                     msecs_to_jiffies(500));
4314         if (!status)
4315                 netdev_warn(netdev, "Device resources not yet released\n");
4316
4317         mutex_lock(&adapter->crit_lock);
4318         adapter->aq_required |= aq_to_restore;
4319         mutex_unlock(&adapter->crit_lock);
4320         return 0;
4321 }
4322
4323 /**
4324  * iavf_change_mtu - Change the Maximum Transfer Unit
4325  * @netdev: network interface device structure
4326  * @new_mtu: new value for maximum frame size
4327  *
4328  * Returns 0 on success, negative on failure
4329  **/
4330 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4331 {
4332         struct iavf_adapter *adapter = netdev_priv(netdev);
4333
4334         netdev_dbg(netdev, "changing MTU from %d to %d\n",
4335                    netdev->mtu, new_mtu);
4336         netdev->mtu = new_mtu;
4337         if (CLIENT_ENABLED(adapter)) {
4338                 iavf_notify_client_l2_params(&adapter->vsi);
4339                 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4340         }
4341
4342         if (netif_running(netdev)) {
4343                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4344                 queue_work(adapter->wq, &adapter->reset_task);
4345         }
4346
4347         return 0;
4348 }
4349
4350 #define NETIF_VLAN_OFFLOAD_FEATURES     (NETIF_F_HW_VLAN_CTAG_RX | \
4351                                          NETIF_F_HW_VLAN_CTAG_TX | \
4352                                          NETIF_F_HW_VLAN_STAG_RX | \
4353                                          NETIF_F_HW_VLAN_STAG_TX)
4354
4355 /**
4356  * iavf_set_features - set the netdev feature flags
4357  * @netdev: ptr to the netdev being adjusted
4358  * @features: the feature set that the stack is suggesting
4359  * Note: expects to be called while under rtnl_lock()
4360  **/
4361 static int iavf_set_features(struct net_device *netdev,
4362                              netdev_features_t features)
4363 {
4364         struct iavf_adapter *adapter = netdev_priv(netdev);
4365
4366         /* trigger update on any VLAN feature change */
4367         if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4368             (features & NETIF_VLAN_OFFLOAD_FEATURES))
4369                 iavf_set_vlan_offload_features(adapter, netdev->features,
4370                                                features);
4371
4372         return 0;
4373 }
4374
4375 /**
4376  * iavf_features_check - Validate encapsulated packet conforms to limits
4377  * @skb: skb buff
4378  * @dev: This physical port's netdev
4379  * @features: Offload features that the stack believes apply
4380  **/
4381 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4382                                              struct net_device *dev,
4383                                              netdev_features_t features)
4384 {
4385         size_t len;
4386
4387         /* No point in doing any of this if neither checksum nor GSO are
4388          * being requested for this frame.  We can rule out both by just
4389          * checking for CHECKSUM_PARTIAL
4390          */
4391         if (skb->ip_summed != CHECKSUM_PARTIAL)
4392                 return features;
4393
4394         /* We cannot support GSO if the MSS is going to be less than
4395          * 64 bytes.  If it is then we need to drop support for GSO.
4396          */
4397         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4398                 features &= ~NETIF_F_GSO_MASK;
4399
4400         /* MACLEN can support at most 63 words */
4401         len = skb_network_header(skb) - skb->data;
4402         if (len & ~(63 * 2))
4403                 goto out_err;
4404
4405         /* IPLEN and EIPLEN can support at most 127 dwords */
4406         len = skb_transport_header(skb) - skb_network_header(skb);
4407         if (len & ~(127 * 4))
4408                 goto out_err;
4409
4410         if (skb->encapsulation) {
4411                 /* L4TUNLEN can support 127 words */
4412                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4413                 if (len & ~(127 * 2))
4414                         goto out_err;
4415
4416                 /* IPLEN can support at most 127 dwords */
4417                 len = skb_inner_transport_header(skb) -
4418                       skb_inner_network_header(skb);
4419                 if (len & ~(127 * 4))
4420                         goto out_err;
4421         }
4422
4423         /* No need to validate L4LEN as TCP is the only protocol with a
4424          * flexible value and we support all possible values supported
4425          * by TCP, which is at most 15 dwords
4426          */
4427
4428         return features;
4429 out_err:
4430         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4431 }
4432
4433 /**
4434  * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4435  * @adapter: board private structure
4436  *
4437  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4438  * were negotiated determine the VLAN features that can be toggled on and off.
4439  **/
4440 static netdev_features_t
4441 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4442 {
4443         netdev_features_t hw_features = 0;
4444
4445         if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4446                 return hw_features;
4447
4448         /* Enable VLAN features if supported */
4449         if (VLAN_ALLOWED(adapter)) {
4450                 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4451                                 NETIF_F_HW_VLAN_CTAG_RX);
4452         } else if (VLAN_V2_ALLOWED(adapter)) {
4453                 struct virtchnl_vlan_caps *vlan_v2_caps =
4454                         &adapter->vlan_v2_caps;
4455                 struct virtchnl_vlan_supported_caps *stripping_support =
4456                         &vlan_v2_caps->offloads.stripping_support;
4457                 struct virtchnl_vlan_supported_caps *insertion_support =
4458                         &vlan_v2_caps->offloads.insertion_support;
4459
4460                 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4461                     stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4462                         if (stripping_support->outer &
4463                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4464                                 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4465                         if (stripping_support->outer &
4466                             VIRTCHNL_VLAN_ETHERTYPE_88A8)
4467                                 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4468                 } else if (stripping_support->inner !=
4469                            VIRTCHNL_VLAN_UNSUPPORTED &&
4470                            stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4471                         if (stripping_support->inner &
4472                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4473                                 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4474                 }
4475
4476                 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4477                     insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4478                         if (insertion_support->outer &
4479                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4480                                 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4481                         if (insertion_support->outer &
4482                             VIRTCHNL_VLAN_ETHERTYPE_88A8)
4483                                 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4484                 } else if (insertion_support->inner &&
4485                            insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4486                         if (insertion_support->inner &
4487                             VIRTCHNL_VLAN_ETHERTYPE_8100)
4488                                 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4489                 }
4490         }
4491
4492         return hw_features;
4493 }
4494
4495 /**
4496  * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4497  * @adapter: board private structure
4498  *
4499  * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4500  * were negotiated determine the VLAN features that are enabled by default.
4501  **/
4502 static netdev_features_t
4503 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4504 {
4505         netdev_features_t features = 0;
4506
4507         if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4508                 return features;
4509
4510         if (VLAN_ALLOWED(adapter)) {
4511                 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4512                         NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4513         } else if (VLAN_V2_ALLOWED(adapter)) {
4514                 struct virtchnl_vlan_caps *vlan_v2_caps =
4515                         &adapter->vlan_v2_caps;
4516                 struct virtchnl_vlan_supported_caps *filtering_support =
4517                         &vlan_v2_caps->filtering.filtering_support;
4518                 struct virtchnl_vlan_supported_caps *stripping_support =
4519                         &vlan_v2_caps->offloads.stripping_support;
4520                 struct virtchnl_vlan_supported_caps *insertion_support =
4521                         &vlan_v2_caps->offloads.insertion_support;
4522                 u32 ethertype_init;
4523
4524                 /* give priority to outer stripping and don't support both outer
4525                  * and inner stripping
4526                  */
4527                 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4528                 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4529                         if (stripping_support->outer &
4530                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4531                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4532                                 features |= NETIF_F_HW_VLAN_CTAG_RX;
4533                         else if (stripping_support->outer &
4534                                  VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4535                                  ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4536                                 features |= NETIF_F_HW_VLAN_STAG_RX;
4537                 } else if (stripping_support->inner !=
4538                            VIRTCHNL_VLAN_UNSUPPORTED) {
4539                         if (stripping_support->inner &
4540                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4541                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4542                                 features |= NETIF_F_HW_VLAN_CTAG_RX;
4543                 }
4544
4545                 /* give priority to outer insertion and don't support both outer
4546                  * and inner insertion
4547                  */
4548                 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4549                         if (insertion_support->outer &
4550                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4551                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4552                                 features |= NETIF_F_HW_VLAN_CTAG_TX;
4553                         else if (insertion_support->outer &
4554                                  VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4555                                  ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4556                                 features |= NETIF_F_HW_VLAN_STAG_TX;
4557                 } else if (insertion_support->inner !=
4558                            VIRTCHNL_VLAN_UNSUPPORTED) {
4559                         if (insertion_support->inner &
4560                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4561                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4562                                 features |= NETIF_F_HW_VLAN_CTAG_TX;
4563                 }
4564
4565                 /* give priority to outer filtering and don't bother if both
4566                  * outer and inner filtering are enabled
4567                  */
4568                 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4569                 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4570                         if (filtering_support->outer &
4571                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4572                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4573                                 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4574                         if (filtering_support->outer &
4575                             VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4576                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4577                                 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4578                 } else if (filtering_support->inner !=
4579                            VIRTCHNL_VLAN_UNSUPPORTED) {
4580                         if (filtering_support->inner &
4581                             VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4582                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4583                                 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4584                         if (filtering_support->inner &
4585                             VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4586                             ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4587                                 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4588                 }
4589         }
4590
4591         return features;
4592 }
4593
4594 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4595         (!(((requested) & (feature_bit)) && \
4596            !((allowed) & (feature_bit))))
4597
4598 /**
4599  * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4600  * @adapter: board private structure
4601  * @requested_features: stack requested NETDEV features
4602  **/
4603 static netdev_features_t
4604 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4605                               netdev_features_t requested_features)
4606 {
4607         netdev_features_t allowed_features;
4608
4609         allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4610                 iavf_get_netdev_vlan_features(adapter);
4611
4612         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4613                                               allowed_features,
4614                                               NETIF_F_HW_VLAN_CTAG_TX))
4615                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4616
4617         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4618                                               allowed_features,
4619                                               NETIF_F_HW_VLAN_CTAG_RX))
4620                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4621
4622         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4623                                               allowed_features,
4624                                               NETIF_F_HW_VLAN_STAG_TX))
4625                 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4626         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4627                                               allowed_features,
4628                                               NETIF_F_HW_VLAN_STAG_RX))
4629                 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4630
4631         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4632                                               allowed_features,
4633                                               NETIF_F_HW_VLAN_CTAG_FILTER))
4634                 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4635
4636         if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4637                                               allowed_features,
4638                                               NETIF_F_HW_VLAN_STAG_FILTER))
4639                 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4640
4641         if ((requested_features &
4642              (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4643             (requested_features &
4644              (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4645             adapter->vlan_v2_caps.offloads.ethertype_match ==
4646             VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4647                 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4648                 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4649                                         NETIF_F_HW_VLAN_STAG_TX);
4650         }
4651
4652         return requested_features;
4653 }
4654
4655 /**
4656  * iavf_fix_features - fix up the netdev feature bits
4657  * @netdev: our net device
4658  * @features: desired feature bits
4659  *
4660  * Returns fixed-up features bits
4661  **/
4662 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4663                                            netdev_features_t features)
4664 {
4665         struct iavf_adapter *adapter = netdev_priv(netdev);
4666
4667         return iavf_fix_netdev_vlan_features(adapter, features);
4668 }
4669
4670 static const struct net_device_ops iavf_netdev_ops = {
4671         .ndo_open               = iavf_open,
4672         .ndo_stop               = iavf_close,
4673         .ndo_start_xmit         = iavf_xmit_frame,
4674         .ndo_set_rx_mode        = iavf_set_rx_mode,
4675         .ndo_validate_addr      = eth_validate_addr,
4676         .ndo_set_mac_address    = iavf_set_mac,
4677         .ndo_change_mtu         = iavf_change_mtu,
4678         .ndo_tx_timeout         = iavf_tx_timeout,
4679         .ndo_vlan_rx_add_vid    = iavf_vlan_rx_add_vid,
4680         .ndo_vlan_rx_kill_vid   = iavf_vlan_rx_kill_vid,
4681         .ndo_features_check     = iavf_features_check,
4682         .ndo_fix_features       = iavf_fix_features,
4683         .ndo_set_features       = iavf_set_features,
4684         .ndo_setup_tc           = iavf_setup_tc,
4685 };
4686
4687 /**
4688  * iavf_check_reset_complete - check that VF reset is complete
4689  * @hw: pointer to hw struct
4690  *
4691  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4692  **/
4693 static int iavf_check_reset_complete(struct iavf_hw *hw)
4694 {
4695         u32 rstat;
4696         int i;
4697
4698         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4699                 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4700                              IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4701                 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4702                     (rstat == VIRTCHNL_VFR_COMPLETED))
4703                         return 0;
4704                 usleep_range(10, 20);
4705         }
4706         return -EBUSY;
4707 }
4708
4709 /**
4710  * iavf_process_config - Process the config information we got from the PF
4711  * @adapter: board private structure
4712  *
4713  * Verify that we have a valid config struct, and set up our netdev features
4714  * and our VSI struct.
4715  **/
4716 int iavf_process_config(struct iavf_adapter *adapter)
4717 {
4718         struct virtchnl_vf_resource *vfres = adapter->vf_res;
4719         netdev_features_t hw_vlan_features, vlan_features;
4720         struct net_device *netdev = adapter->netdev;
4721         netdev_features_t hw_enc_features;
4722         netdev_features_t hw_features;
4723
4724         hw_enc_features = NETIF_F_SG                    |
4725                           NETIF_F_IP_CSUM               |
4726                           NETIF_F_IPV6_CSUM             |
4727                           NETIF_F_HIGHDMA               |
4728                           NETIF_F_SOFT_FEATURES |
4729                           NETIF_F_TSO                   |
4730                           NETIF_F_TSO_ECN               |
4731                           NETIF_F_TSO6                  |
4732                           NETIF_F_SCTP_CRC              |
4733                           NETIF_F_RXHASH                |
4734                           NETIF_F_RXCSUM                |
4735                           0;
4736
4737         /* advertise to stack only if offloads for encapsulated packets is
4738          * supported
4739          */
4740         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4741                 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
4742                                    NETIF_F_GSO_GRE              |
4743                                    NETIF_F_GSO_GRE_CSUM         |
4744                                    NETIF_F_GSO_IPXIP4           |
4745                                    NETIF_F_GSO_IPXIP6           |
4746                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
4747                                    NETIF_F_GSO_PARTIAL          |
4748                                    0;
4749
4750                 if (!(vfres->vf_cap_flags &
4751                       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4752                         netdev->gso_partial_features |=
4753                                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4754
4755                 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4756                 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4757                 netdev->hw_enc_features |= hw_enc_features;
4758         }
4759         /* record features VLANs can make use of */
4760         netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4761
4762         /* Write features and hw_features separately to avoid polluting
4763          * with, or dropping, features that are set when we registered.
4764          */
4765         hw_features = hw_enc_features;
4766
4767         /* get HW VLAN features that can be toggled */
4768         hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4769
4770         /* Enable cloud filter if ADQ is supported */
4771         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4772                 hw_features |= NETIF_F_HW_TC;
4773         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4774                 hw_features |= NETIF_F_GSO_UDP_L4;
4775
4776         netdev->hw_features |= hw_features | hw_vlan_features;
4777         vlan_features = iavf_get_netdev_vlan_features(adapter);
4778
4779         netdev->features |= hw_features | vlan_features;
4780
4781         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4782                 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4783
4784         netdev->priv_flags |= IFF_UNICAST_FLT;
4785
4786         /* Do not turn on offloads when they are requested to be turned off.
4787          * TSO needs minimum 576 bytes to work correctly.
4788          */
4789         if (netdev->wanted_features) {
4790                 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4791                     netdev->mtu < 576)
4792                         netdev->features &= ~NETIF_F_TSO;
4793                 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4794                     netdev->mtu < 576)
4795                         netdev->features &= ~NETIF_F_TSO6;
4796                 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4797                         netdev->features &= ~NETIF_F_TSO_ECN;
4798                 if (!(netdev->wanted_features & NETIF_F_GRO))
4799                         netdev->features &= ~NETIF_F_GRO;
4800                 if (!(netdev->wanted_features & NETIF_F_GSO))
4801                         netdev->features &= ~NETIF_F_GSO;
4802         }
4803
4804         return 0;
4805 }
4806
4807 /**
4808  * iavf_shutdown - Shutdown the device in preparation for a reboot
4809  * @pdev: pci device structure
4810  **/
4811 static void iavf_shutdown(struct pci_dev *pdev)
4812 {
4813         struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4814         struct net_device *netdev = adapter->netdev;
4815
4816         netif_device_detach(netdev);
4817
4818         if (netif_running(netdev))
4819                 iavf_close(netdev);
4820
4821         if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4822                 dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
4823         /* Prevent the watchdog from running. */
4824         iavf_change_state(adapter, __IAVF_REMOVE);
4825         adapter->aq_required = 0;
4826         mutex_unlock(&adapter->crit_lock);
4827
4828 #ifdef CONFIG_PM
4829         pci_save_state(pdev);
4830
4831 #endif
4832         pci_disable_device(pdev);
4833 }
4834
4835 /**
4836  * iavf_probe - Device Initialization Routine
4837  * @pdev: PCI device information struct
4838  * @ent: entry in iavf_pci_tbl
4839  *
4840  * Returns 0 on success, negative on failure
4841  *
4842  * iavf_probe initializes an adapter identified by a pci_dev structure.
4843  * The OS initialization, configuring of the adapter private structure,
4844  * and a hardware reset occur.
4845  **/
4846 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4847 {
4848         struct net_device *netdev;
4849         struct iavf_adapter *adapter = NULL;
4850         struct iavf_hw *hw = NULL;
4851         int err;
4852
4853         err = pci_enable_device(pdev);
4854         if (err)
4855                 return err;
4856
4857         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4858         if (err) {
4859                 dev_err(&pdev->dev,
4860                         "DMA configuration failed: 0x%x\n", err);
4861                 goto err_dma;
4862         }
4863
4864         err = pci_request_regions(pdev, iavf_driver_name);
4865         if (err) {
4866                 dev_err(&pdev->dev,
4867                         "pci_request_regions failed 0x%x\n", err);
4868                 goto err_pci_reg;
4869         }
4870
4871         pci_enable_pcie_error_reporting(pdev);
4872
4873         pci_set_master(pdev);
4874
4875         netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4876                                    IAVF_MAX_REQ_QUEUES);
4877         if (!netdev) {
4878                 err = -ENOMEM;
4879                 goto err_alloc_etherdev;
4880         }
4881
4882         SET_NETDEV_DEV(netdev, &pdev->dev);
4883
4884         pci_set_drvdata(pdev, netdev);
4885         adapter = netdev_priv(netdev);
4886
4887         adapter->netdev = netdev;
4888         adapter->pdev = pdev;
4889
4890         hw = &adapter->hw;
4891         hw->back = adapter;
4892
4893         adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4894                                               iavf_driver_name);
4895         if (!adapter->wq) {
4896                 err = -ENOMEM;
4897                 goto err_alloc_wq;
4898         }
4899
4900         adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4901         iavf_change_state(adapter, __IAVF_STARTUP);
4902
4903         /* Call save state here because it relies on the adapter struct. */
4904         pci_save_state(pdev);
4905
4906         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4907                               pci_resource_len(pdev, 0));
4908         if (!hw->hw_addr) {
4909                 err = -EIO;
4910                 goto err_ioremap;
4911         }
4912         hw->vendor_id = pdev->vendor;
4913         hw->device_id = pdev->device;
4914         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4915         hw->subsystem_vendor_id = pdev->subsystem_vendor;
4916         hw->subsystem_device_id = pdev->subsystem_device;
4917         hw->bus.device = PCI_SLOT(pdev->devfn);
4918         hw->bus.func = PCI_FUNC(pdev->devfn);
4919         hw->bus.bus_id = pdev->bus->number;
4920
4921         /* set up the locks for the AQ, do this only once in probe
4922          * and destroy them only once in remove
4923          */
4924         mutex_init(&adapter->crit_lock);
4925         mutex_init(&adapter->client_lock);
4926         mutex_init(&hw->aq.asq_mutex);
4927         mutex_init(&hw->aq.arq_mutex);
4928
4929         spin_lock_init(&adapter->mac_vlan_list_lock);
4930         spin_lock_init(&adapter->cloud_filter_list_lock);
4931         spin_lock_init(&adapter->fdir_fltr_lock);
4932         spin_lock_init(&adapter->adv_rss_lock);
4933
4934         INIT_LIST_HEAD(&adapter->mac_filter_list);
4935         INIT_LIST_HEAD(&adapter->vlan_filter_list);
4936         INIT_LIST_HEAD(&adapter->cloud_filter_list);
4937         INIT_LIST_HEAD(&adapter->fdir_list_head);
4938         INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4939
4940         INIT_WORK(&adapter->reset_task, iavf_reset_task);
4941         INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4942         INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4943         INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4944         queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4945                            msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4946
4947         /* Setup the wait queue for indicating transition to down status */
4948         init_waitqueue_head(&adapter->down_waitqueue);
4949
4950         /* Setup the wait queue for indicating virtchannel events */
4951         init_waitqueue_head(&adapter->vc_waitqueue);
4952
4953         return 0;
4954
4955 err_ioremap:
4956         destroy_workqueue(adapter->wq);
4957 err_alloc_wq:
4958         free_netdev(netdev);
4959 err_alloc_etherdev:
4960         pci_disable_pcie_error_reporting(pdev);
4961         pci_release_regions(pdev);
4962 err_pci_reg:
4963 err_dma:
4964         pci_disable_device(pdev);
4965         return err;
4966 }
4967
4968 /**
4969  * iavf_suspend - Power management suspend routine
4970  * @dev_d: device info pointer
4971  *
4972  * Called when the system (VM) is entering sleep/suspend.
4973  **/
4974 static int __maybe_unused iavf_suspend(struct device *dev_d)
4975 {
4976         struct net_device *netdev = dev_get_drvdata(dev_d);
4977         struct iavf_adapter *adapter = netdev_priv(netdev);
4978
4979         netif_device_detach(netdev);
4980
4981         while (!mutex_trylock(&adapter->crit_lock))
4982                 usleep_range(500, 1000);
4983
4984         if (netif_running(netdev)) {
4985                 rtnl_lock();
4986                 iavf_down(adapter);
4987                 rtnl_unlock();
4988         }
4989         iavf_free_misc_irq(adapter);
4990         iavf_reset_interrupt_capability(adapter);
4991
4992         mutex_unlock(&adapter->crit_lock);
4993
4994         return 0;
4995 }
4996
4997 /**
4998  * iavf_resume - Power management resume routine
4999  * @dev_d: device info pointer
5000  *
5001  * Called when the system (VM) is resumed from sleep/suspend.
5002  **/
5003 static int __maybe_unused iavf_resume(struct device *dev_d)
5004 {
5005         struct pci_dev *pdev = to_pci_dev(dev_d);
5006         struct iavf_adapter *adapter;
5007         u32 err;
5008
5009         adapter = iavf_pdev_to_adapter(pdev);
5010
5011         pci_set_master(pdev);
5012
5013         rtnl_lock();
5014         err = iavf_set_interrupt_capability(adapter);
5015         if (err) {
5016                 rtnl_unlock();
5017                 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5018                 return err;
5019         }
5020         err = iavf_request_misc_irq(adapter);
5021         rtnl_unlock();
5022         if (err) {
5023                 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5024                 return err;
5025         }
5026
5027         queue_work(adapter->wq, &adapter->reset_task);
5028
5029         netif_device_attach(adapter->netdev);
5030
5031         return err;
5032 }
5033
5034 /**
5035  * iavf_remove - Device Removal Routine
5036  * @pdev: PCI device information struct
5037  *
5038  * iavf_remove is called by the PCI subsystem to alert the driver
5039  * that it should release a PCI device.  The could be caused by a
5040  * Hot-Plug event, or because the driver is going to be removed from
5041  * memory.
5042  **/
5043 static void iavf_remove(struct pci_dev *pdev)
5044 {
5045         struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
5046         struct iavf_fdir_fltr *fdir, *fdirtmp;
5047         struct iavf_vlan_filter *vlf, *vlftmp;
5048         struct iavf_cloud_filter *cf, *cftmp;
5049         struct iavf_adv_rss *rss, *rsstmp;
5050         struct iavf_mac_filter *f, *ftmp;
5051         struct net_device *netdev;
5052         struct iavf_hw *hw;
5053         int err;
5054
5055         netdev = adapter->netdev;
5056         hw = &adapter->hw;
5057
5058         if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5059                 return;
5060
5061         /* Wait until port initialization is complete.
5062          * There are flows where register/unregister netdev may race.
5063          */
5064         while (1) {
5065                 mutex_lock(&adapter->crit_lock);
5066                 if (adapter->state == __IAVF_RUNNING ||
5067                     adapter->state == __IAVF_DOWN ||
5068                     adapter->state == __IAVF_INIT_FAILED) {
5069                         mutex_unlock(&adapter->crit_lock);
5070                         break;
5071                 }
5072
5073                 mutex_unlock(&adapter->crit_lock);
5074                 usleep_range(500, 1000);
5075         }
5076         cancel_delayed_work_sync(&adapter->watchdog_task);
5077
5078         if (adapter->netdev_registered) {
5079                 rtnl_lock();
5080                 unregister_netdevice(netdev);
5081                 adapter->netdev_registered = false;
5082                 rtnl_unlock();
5083         }
5084         if (CLIENT_ALLOWED(adapter)) {
5085                 err = iavf_lan_del_device(adapter);
5086                 if (err)
5087                         dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
5088                                  err);
5089         }
5090
5091         mutex_lock(&adapter->crit_lock);
5092         dev_info(&adapter->pdev->dev, "Removing device\n");
5093         iavf_change_state(adapter, __IAVF_REMOVE);
5094
5095         iavf_request_reset(adapter);
5096         msleep(50);
5097         /* If the FW isn't responding, kick it once, but only once. */
5098         if (!iavf_asq_done(hw)) {
5099                 iavf_request_reset(adapter);
5100                 msleep(50);
5101         }
5102
5103         iavf_misc_irq_disable(adapter);
5104         /* Shut down all the garbage mashers on the detention level */
5105         cancel_work_sync(&adapter->reset_task);
5106         cancel_delayed_work_sync(&adapter->watchdog_task);
5107         cancel_work_sync(&adapter->adminq_task);
5108         cancel_delayed_work_sync(&adapter->client_task);
5109
5110         adapter->aq_required = 0;
5111         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5112
5113         iavf_free_all_tx_resources(adapter);
5114         iavf_free_all_rx_resources(adapter);
5115         iavf_free_misc_irq(adapter);
5116
5117         iavf_reset_interrupt_capability(adapter);
5118         iavf_free_q_vectors(adapter);
5119
5120         iavf_free_rss(adapter);
5121
5122         if (hw->aq.asq.count)
5123                 iavf_shutdown_adminq(hw);
5124
5125         /* destroy the locks only once, here */
5126         mutex_destroy(&hw->aq.arq_mutex);
5127         mutex_destroy(&hw->aq.asq_mutex);
5128         mutex_destroy(&adapter->client_lock);
5129         mutex_unlock(&adapter->crit_lock);
5130         mutex_destroy(&adapter->crit_lock);
5131
5132         iounmap(hw->hw_addr);
5133         pci_release_regions(pdev);
5134         iavf_free_queues(adapter);
5135         kfree(adapter->vf_res);
5136         spin_lock_bh(&adapter->mac_vlan_list_lock);
5137         /* If we got removed before an up/down sequence, we've got a filter
5138          * hanging out there that we need to get rid of.
5139          */
5140         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5141                 list_del(&f->list);
5142                 kfree(f);
5143         }
5144         list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5145                                  list) {
5146                 list_del(&vlf->list);
5147                 kfree(vlf);
5148         }
5149
5150         spin_unlock_bh(&adapter->mac_vlan_list_lock);
5151
5152         spin_lock_bh(&adapter->cloud_filter_list_lock);
5153         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5154                 list_del(&cf->list);
5155                 kfree(cf);
5156         }
5157         spin_unlock_bh(&adapter->cloud_filter_list_lock);
5158
5159         spin_lock_bh(&adapter->fdir_fltr_lock);
5160         list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5161                 list_del(&fdir->list);
5162                 kfree(fdir);
5163         }
5164         spin_unlock_bh(&adapter->fdir_fltr_lock);
5165
5166         spin_lock_bh(&adapter->adv_rss_lock);
5167         list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5168                                  list) {
5169                 list_del(&rss->list);
5170                 kfree(rss);
5171         }
5172         spin_unlock_bh(&adapter->adv_rss_lock);
5173
5174         destroy_workqueue(adapter->wq);
5175
5176         free_netdev(netdev);
5177
5178         pci_disable_pcie_error_reporting(pdev);
5179
5180         pci_disable_device(pdev);
5181 }
5182
5183 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5184
5185 static struct pci_driver iavf_driver = {
5186         .name      = iavf_driver_name,
5187         .id_table  = iavf_pci_tbl,
5188         .probe     = iavf_probe,
5189         .remove    = iavf_remove,
5190         .driver.pm = &iavf_pm_ops,
5191         .shutdown  = iavf_shutdown,
5192 };
5193
5194 /**
5195  * iavf_init_module - Driver Registration Routine
5196  *
5197  * iavf_init_module is the first routine called when the driver is
5198  * loaded. All it does is register with the PCI subsystem.
5199  **/
5200 static int __init iavf_init_module(void)
5201 {
5202         pr_info("iavf: %s\n", iavf_driver_string);
5203
5204         pr_info("%s\n", iavf_copyright);
5205
5206         return pci_register_driver(&iavf_driver);
5207 }
5208
5209 module_init(iavf_init_module);
5210
5211 /**
5212  * iavf_exit_module - Driver Exit Cleanup Routine
5213  *
5214  * iavf_exit_module is called just before the driver is removed
5215  * from memory.
5216  **/
5217 static void __exit iavf_exit_module(void)
5218 {
5219         pci_unregister_driver(&iavf_driver);
5220 }
5221
5222 module_exit(iavf_exit_module);
5223
5224 /* iavf_main.c */