i40e: Fix device ID define names to align to standard
[linux-2.6-block.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #ifdef CONFIG_I40E_VXLAN
30 #include <net/vxlan.h>
31 #endif
32
33 const char i40e_driver_name[] = "i40e";
34 static const char i40e_driver_string[] =
35                         "Intel(R) Ethernet Connection XL710 Network Driver";
36
37 #define DRV_KERN "-k"
38
39 #define DRV_VERSION_MAJOR 0
40 #define DRV_VERSION_MINOR 3
41 #define DRV_VERSION_BUILD 30
42 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43              __stringify(DRV_VERSION_MINOR) "." \
44              __stringify(DRV_VERSION_BUILD)    DRV_KERN
45 const char i40e_driver_version_str[] = DRV_VERSION;
46 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
47
48 /* a bit of forward declarations */
49 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
50 static void i40e_handle_reset_warning(struct i40e_pf *pf);
51 static int i40e_add_vsi(struct i40e_vsi *vsi);
52 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
53 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
54 static int i40e_setup_misc_vector(struct i40e_pf *pf);
55 static void i40e_determine_queue_usage(struct i40e_pf *pf);
56 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
57 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
58 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
59
60 /* i40e_pci_tbl - PCI Device ID Table
61  *
62  * Last entry must be all 0s
63  *
64  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
65  *   Class, Class Mask, private data (not used) }
66  */
67 static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
68         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
69         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
70         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
75         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
76         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
77         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
78         /* required last entry */
79         {0, }
80 };
81 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
82
83 #define I40E_MAX_VF_COUNT 128
84 static int debug = -1;
85 module_param(debug, int, 0);
86 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87
88 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
89 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(DRV_VERSION);
92
93 /**
94  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
95  * @hw:   pointer to the HW structure
96  * @mem:  ptr to mem struct to fill out
97  * @size: size of memory requested
98  * @alignment: what to align the allocation to
99  **/
100 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101                             u64 size, u32 alignment)
102 {
103         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
104
105         mem->size = ALIGN(size, alignment);
106         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
107                                       &mem->pa, GFP_KERNEL);
108         if (!mem->va)
109                 return -ENOMEM;
110
111         return 0;
112 }
113
114 /**
115  * i40e_free_dma_mem_d - OS specific memory free for shared code
116  * @hw:   pointer to the HW structure
117  * @mem:  ptr to mem struct to free
118  **/
119 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
120 {
121         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
122
123         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
124         mem->va = NULL;
125         mem->pa = 0;
126         mem->size = 0;
127
128         return 0;
129 }
130
131 /**
132  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
133  * @hw:   pointer to the HW structure
134  * @mem:  ptr to mem struct to fill out
135  * @size: size of memory requested
136  **/
137 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
138                              u32 size)
139 {
140         mem->size = size;
141         mem->va = kzalloc(size, GFP_KERNEL);
142
143         if (!mem->va)
144                 return -ENOMEM;
145
146         return 0;
147 }
148
149 /**
150  * i40e_free_virt_mem_d - OS specific memory free for shared code
151  * @hw:   pointer to the HW structure
152  * @mem:  ptr to mem struct to free
153  **/
154 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
155 {
156         /* it's ok to kfree a NULL pointer */
157         kfree(mem->va);
158         mem->va = NULL;
159         mem->size = 0;
160
161         return 0;
162 }
163
164 /**
165  * i40e_get_lump - find a lump of free generic resource
166  * @pf: board private structure
167  * @pile: the pile of resource to search
168  * @needed: the number of items needed
169  * @id: an owner id to stick on the items assigned
170  *
171  * Returns the base item index of the lump, or negative for error
172  *
173  * The search_hint trick and lack of advanced fit-finding only work
174  * because we're highly likely to have all the same size lump requests.
175  * Linear search time and any fragmentation should be minimal.
176  **/
177 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
178                          u16 needed, u16 id)
179 {
180         int ret = -ENOMEM;
181         int i, j;
182
183         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
184                 dev_info(&pf->pdev->dev,
185                          "param err: pile=%p needed=%d id=0x%04x\n",
186                          pile, needed, id);
187                 return -EINVAL;
188         }
189
190         /* start the linear search with an imperfect hint */
191         i = pile->search_hint;
192         while (i < pile->num_entries) {
193                 /* skip already allocated entries */
194                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
195                         i++;
196                         continue;
197                 }
198
199                 /* do we have enough in this lump? */
200                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
201                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
202                                 break;
203                 }
204
205                 if (j == needed) {
206                         /* there was enough, so assign it to the requestor */
207                         for (j = 0; j < needed; j++)
208                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
209                         ret = i;
210                         pile->search_hint = i + j;
211                         break;
212                 } else {
213                         /* not enough, so skip over it and continue looking */
214                         i += j;
215                 }
216         }
217
218         return ret;
219 }
220
221 /**
222  * i40e_put_lump - return a lump of generic resource
223  * @pile: the pile of resource to search
224  * @index: the base item index
225  * @id: the owner id of the items assigned
226  *
227  * Returns the count of items in the lump
228  **/
229 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
230 {
231         int valid_id = (id | I40E_PILE_VALID_BIT);
232         int count = 0;
233         int i;
234
235         if (!pile || index >= pile->num_entries)
236                 return -EINVAL;
237
238         for (i = index;
239              i < pile->num_entries && pile->list[i] == valid_id;
240              i++) {
241                 pile->list[i] = 0;
242                 count++;
243         }
244
245         if (count && index < pile->search_hint)
246                 pile->search_hint = index;
247
248         return count;
249 }
250
251 /**
252  * i40e_service_event_schedule - Schedule the service task to wake up
253  * @pf: board private structure
254  *
255  * If not already scheduled, this puts the task into the work queue
256  **/
257 static void i40e_service_event_schedule(struct i40e_pf *pf)
258 {
259         if (!test_bit(__I40E_DOWN, &pf->state) &&
260             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
261             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
262                 schedule_work(&pf->service_task);
263 }
264
265 /**
266  * i40e_tx_timeout - Respond to a Tx Hang
267  * @netdev: network interface device structure
268  *
269  * If any port has noticed a Tx timeout, it is likely that the whole
270  * device is munged, not just the one netdev port, so go for the full
271  * reset.
272  **/
273 static void i40e_tx_timeout(struct net_device *netdev)
274 {
275         struct i40e_netdev_priv *np = netdev_priv(netdev);
276         struct i40e_vsi *vsi = np->vsi;
277         struct i40e_pf *pf = vsi->back;
278
279         pf->tx_timeout_count++;
280
281         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
282                 pf->tx_timeout_recovery_level = 0;
283         pf->tx_timeout_last_recovery = jiffies;
284         netdev_info(netdev, "tx_timeout recovery level %d\n",
285                     pf->tx_timeout_recovery_level);
286
287         switch (pf->tx_timeout_recovery_level) {
288         case 0:
289                 /* disable and re-enable queues for the VSI */
290                 if (in_interrupt()) {
291                         set_bit(__I40E_REINIT_REQUESTED, &pf->state);
292                         set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
293                 } else {
294                         i40e_vsi_reinit_locked(vsi);
295                 }
296                 break;
297         case 1:
298                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
299                 break;
300         case 2:
301                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
302                 break;
303         case 3:
304                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
305                 break;
306         default:
307                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
308                 i40e_down(vsi);
309                 break;
310         }
311         i40e_service_event_schedule(pf);
312         pf->tx_timeout_recovery_level++;
313 }
314
315 /**
316  * i40e_release_rx_desc - Store the new tail and head values
317  * @rx_ring: ring to bump
318  * @val: new head index
319  **/
320 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
321 {
322         rx_ring->next_to_use = val;
323
324         /* Force memory writes to complete before letting h/w
325          * know there are new descriptors to fetch.  (Only
326          * applicable for weak-ordered memory model archs,
327          * such as IA-64).
328          */
329         wmb();
330         writel(val, rx_ring->tail);
331 }
332
333 /**
334  * i40e_get_vsi_stats_struct - Get System Network Statistics
335  * @vsi: the VSI we care about
336  *
337  * Returns the address of the device statistics structure.
338  * The statistics are actually updated from the service task.
339  **/
340 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
341 {
342         return &vsi->net_stats;
343 }
344
345 /**
346  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
347  * @netdev: network interface device structure
348  *
349  * Returns the address of the device statistics structure.
350  * The statistics are actually updated from the service task.
351  **/
352 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
353                                              struct net_device *netdev,
354                                              struct rtnl_link_stats64 *stats)
355 {
356         struct i40e_netdev_priv *np = netdev_priv(netdev);
357         struct i40e_vsi *vsi = np->vsi;
358         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
359         int i;
360
361         if (test_bit(__I40E_DOWN, &vsi->state))
362                 return stats;
363
364         if (!vsi->tx_rings)
365                 return stats;
366
367         rcu_read_lock();
368         for (i = 0; i < vsi->num_queue_pairs; i++) {
369                 struct i40e_ring *tx_ring, *rx_ring;
370                 u64 bytes, packets;
371                 unsigned int start;
372
373                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
374                 if (!tx_ring)
375                         continue;
376
377                 do {
378                         start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
379                         packets = tx_ring->stats.packets;
380                         bytes   = tx_ring->stats.bytes;
381                 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
382
383                 stats->tx_packets += packets;
384                 stats->tx_bytes   += bytes;
385                 rx_ring = &tx_ring[1];
386
387                 do {
388                         start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
389                         packets = rx_ring->stats.packets;
390                         bytes   = rx_ring->stats.bytes;
391                 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
392
393                 stats->rx_packets += packets;
394                 stats->rx_bytes   += bytes;
395         }
396         rcu_read_unlock();
397
398         /* following stats updated by ixgbe_watchdog_task() */
399         stats->multicast        = vsi_stats->multicast;
400         stats->tx_errors        = vsi_stats->tx_errors;
401         stats->tx_dropped       = vsi_stats->tx_dropped;
402         stats->rx_errors        = vsi_stats->rx_errors;
403         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
404         stats->rx_length_errors = vsi_stats->rx_length_errors;
405
406         return stats;
407 }
408
409 /**
410  * i40e_vsi_reset_stats - Resets all stats of the given vsi
411  * @vsi: the VSI to have its stats reset
412  **/
413 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
414 {
415         struct rtnl_link_stats64 *ns;
416         int i;
417
418         if (!vsi)
419                 return;
420
421         ns = i40e_get_vsi_stats_struct(vsi);
422         memset(ns, 0, sizeof(*ns));
423         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
424         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
425         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
426         if (vsi->rx_rings && vsi->rx_rings[0]) {
427                 for (i = 0; i < vsi->num_queue_pairs; i++) {
428                         memset(&vsi->rx_rings[i]->stats, 0 ,
429                                sizeof(vsi->rx_rings[i]->stats));
430                         memset(&vsi->rx_rings[i]->rx_stats, 0 ,
431                                sizeof(vsi->rx_rings[i]->rx_stats));
432                         memset(&vsi->tx_rings[i]->stats, 0 ,
433                                sizeof(vsi->tx_rings[i]->stats));
434                         memset(&vsi->tx_rings[i]->tx_stats, 0,
435                                sizeof(vsi->tx_rings[i]->tx_stats));
436                 }
437         }
438         vsi->stat_offsets_loaded = false;
439 }
440
441 /**
442  * i40e_pf_reset_stats - Reset all of the stats for the given pf
443  * @pf: the PF to be reset
444  **/
445 void i40e_pf_reset_stats(struct i40e_pf *pf)
446 {
447         memset(&pf->stats, 0, sizeof(pf->stats));
448         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
449         pf->stat_offsets_loaded = false;
450 }
451
452 /**
453  * i40e_stat_update48 - read and update a 48 bit stat from the chip
454  * @hw: ptr to the hardware info
455  * @hireg: the high 32 bit reg to read
456  * @loreg: the low 32 bit reg to read
457  * @offset_loaded: has the initial offset been loaded yet
458  * @offset: ptr to current offset value
459  * @stat: ptr to the stat
460  *
461  * Since the device stats are not reset at PFReset, they likely will not
462  * be zeroed when the driver starts.  We'll save the first values read
463  * and use them as offsets to be subtracted from the raw values in order
464  * to report stats that count from zero.  In the process, we also manage
465  * the potential roll-over.
466  **/
467 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
468                                bool offset_loaded, u64 *offset, u64 *stat)
469 {
470         u64 new_data;
471
472         if (hw->device_id == I40E_DEV_ID_QEMU) {
473                 new_data = rd32(hw, loreg);
474                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
475         } else {
476                 new_data = rd64(hw, loreg);
477         }
478         if (!offset_loaded)
479                 *offset = new_data;
480         if (likely(new_data >= *offset))
481                 *stat = new_data - *offset;
482         else
483                 *stat = (new_data + ((u64)1 << 48)) - *offset;
484         *stat &= 0xFFFFFFFFFFFFULL;
485 }
486
487 /**
488  * i40e_stat_update32 - read and update a 32 bit stat from the chip
489  * @hw: ptr to the hardware info
490  * @reg: the hw reg to read
491  * @offset_loaded: has the initial offset been loaded yet
492  * @offset: ptr to current offset value
493  * @stat: ptr to the stat
494  **/
495 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
496                                bool offset_loaded, u64 *offset, u64 *stat)
497 {
498         u32 new_data;
499
500         new_data = rd32(hw, reg);
501         if (!offset_loaded)
502                 *offset = new_data;
503         if (likely(new_data >= *offset))
504                 *stat = (u32)(new_data - *offset);
505         else
506                 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
507 }
508
509 /**
510  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
511  * @vsi: the VSI to be updated
512  **/
513 void i40e_update_eth_stats(struct i40e_vsi *vsi)
514 {
515         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
516         struct i40e_pf *pf = vsi->back;
517         struct i40e_hw *hw = &pf->hw;
518         struct i40e_eth_stats *oes;
519         struct i40e_eth_stats *es;     /* device's eth stats */
520
521         es = &vsi->eth_stats;
522         oes = &vsi->eth_stats_offsets;
523
524         /* Gather up the stats that the hw collects */
525         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
526                            vsi->stat_offsets_loaded,
527                            &oes->tx_errors, &es->tx_errors);
528         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
529                            vsi->stat_offsets_loaded,
530                            &oes->rx_discards, &es->rx_discards);
531
532         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
533                            I40E_GLV_GORCL(stat_idx),
534                            vsi->stat_offsets_loaded,
535                            &oes->rx_bytes, &es->rx_bytes);
536         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
537                            I40E_GLV_UPRCL(stat_idx),
538                            vsi->stat_offsets_loaded,
539                            &oes->rx_unicast, &es->rx_unicast);
540         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
541                            I40E_GLV_MPRCL(stat_idx),
542                            vsi->stat_offsets_loaded,
543                            &oes->rx_multicast, &es->rx_multicast);
544         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
545                            I40E_GLV_BPRCL(stat_idx),
546                            vsi->stat_offsets_loaded,
547                            &oes->rx_broadcast, &es->rx_broadcast);
548
549         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
550                            I40E_GLV_GOTCL(stat_idx),
551                            vsi->stat_offsets_loaded,
552                            &oes->tx_bytes, &es->tx_bytes);
553         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
554                            I40E_GLV_UPTCL(stat_idx),
555                            vsi->stat_offsets_loaded,
556                            &oes->tx_unicast, &es->tx_unicast);
557         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
558                            I40E_GLV_MPTCL(stat_idx),
559                            vsi->stat_offsets_loaded,
560                            &oes->tx_multicast, &es->tx_multicast);
561         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
562                            I40E_GLV_BPTCL(stat_idx),
563                            vsi->stat_offsets_loaded,
564                            &oes->tx_broadcast, &es->tx_broadcast);
565         vsi->stat_offsets_loaded = true;
566 }
567
568 /**
569  * i40e_update_veb_stats - Update Switch component statistics
570  * @veb: the VEB being updated
571  **/
572 static void i40e_update_veb_stats(struct i40e_veb *veb)
573 {
574         struct i40e_pf *pf = veb->pf;
575         struct i40e_hw *hw = &pf->hw;
576         struct i40e_eth_stats *oes;
577         struct i40e_eth_stats *es;     /* device's eth stats */
578         int idx = 0;
579
580         idx = veb->stats_idx;
581         es = &veb->stats;
582         oes = &veb->stats_offsets;
583
584         /* Gather up the stats that the hw collects */
585         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
586                            veb->stat_offsets_loaded,
587                            &oes->tx_discards, &es->tx_discards);
588         if (hw->revision_id > 0)
589                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
590                                    veb->stat_offsets_loaded,
591                                    &oes->rx_unknown_protocol,
592                                    &es->rx_unknown_protocol);
593         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
594                            veb->stat_offsets_loaded,
595                            &oes->rx_bytes, &es->rx_bytes);
596         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
597                            veb->stat_offsets_loaded,
598                            &oes->rx_unicast, &es->rx_unicast);
599         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
600                            veb->stat_offsets_loaded,
601                            &oes->rx_multicast, &es->rx_multicast);
602         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
603                            veb->stat_offsets_loaded,
604                            &oes->rx_broadcast, &es->rx_broadcast);
605
606         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
607                            veb->stat_offsets_loaded,
608                            &oes->tx_bytes, &es->tx_bytes);
609         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
610                            veb->stat_offsets_loaded,
611                            &oes->tx_unicast, &es->tx_unicast);
612         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
613                            veb->stat_offsets_loaded,
614                            &oes->tx_multicast, &es->tx_multicast);
615         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
616                            veb->stat_offsets_loaded,
617                            &oes->tx_broadcast, &es->tx_broadcast);
618         veb->stat_offsets_loaded = true;
619 }
620
621 /**
622  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
623  * @pf: the corresponding PF
624  *
625  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
626  **/
627 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
628 {
629         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
630         struct i40e_hw_port_stats *nsd = &pf->stats;
631         struct i40e_hw *hw = &pf->hw;
632         u64 xoff = 0;
633         u16 i, v;
634
635         if ((hw->fc.current_mode != I40E_FC_FULL) &&
636             (hw->fc.current_mode != I40E_FC_RX_PAUSE))
637                 return;
638
639         xoff = nsd->link_xoff_rx;
640         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
641                            pf->stat_offsets_loaded,
642                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
643
644         /* No new LFC xoff rx */
645         if (!(nsd->link_xoff_rx - xoff))
646                 return;
647
648         /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
649         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
650                 struct i40e_vsi *vsi = pf->vsi[v];
651
652                 if (!vsi)
653                         continue;
654
655                 for (i = 0; i < vsi->num_queue_pairs; i++) {
656                         struct i40e_ring *ring = vsi->tx_rings[i];
657                         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
658                 }
659         }
660 }
661
662 /**
663  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
664  * @pf: the corresponding PF
665  *
666  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
667  **/
668 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
669 {
670         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
671         struct i40e_hw_port_stats *nsd = &pf->stats;
672         bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
673         struct i40e_dcbx_config *dcb_cfg;
674         struct i40e_hw *hw = &pf->hw;
675         u16 i, v;
676         u8 tc;
677
678         dcb_cfg = &hw->local_dcbx_config;
679
680         /* See if DCB enabled with PFC TC */
681         if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
682             !(dcb_cfg->pfc.pfcenable)) {
683                 i40e_update_link_xoff_rx(pf);
684                 return;
685         }
686
687         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
688                 u64 prio_xoff = nsd->priority_xoff_rx[i];
689                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
690                                    pf->stat_offsets_loaded,
691                                    &osd->priority_xoff_rx[i],
692                                    &nsd->priority_xoff_rx[i]);
693
694                 /* No new PFC xoff rx */
695                 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
696                         continue;
697                 /* Get the TC for given priority */
698                 tc = dcb_cfg->etscfg.prioritytable[i];
699                 xoff[tc] = true;
700         }
701
702         /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
703         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
704                 struct i40e_vsi *vsi = pf->vsi[v];
705
706                 if (!vsi)
707                         continue;
708
709                 for (i = 0; i < vsi->num_queue_pairs; i++) {
710                         struct i40e_ring *ring = vsi->tx_rings[i];
711
712                         tc = ring->dcb_tc;
713                         if (xoff[tc])
714                                 clear_bit(__I40E_HANG_CHECK_ARMED,
715                                           &ring->state);
716                 }
717         }
718 }
719
720 /**
721  * i40e_update_stats - Update the board statistics counters.
722  * @vsi: the VSI to be updated
723  *
724  * There are a few instances where we store the same stat in a
725  * couple of different structs.  This is partly because we have
726  * the netdev stats that need to be filled out, which is slightly
727  * different from the "eth_stats" defined by the chip and used in
728  * VF communications.  We sort it all out here in a central place.
729  **/
730 void i40e_update_stats(struct i40e_vsi *vsi)
731 {
732         struct i40e_pf *pf = vsi->back;
733         struct i40e_hw *hw = &pf->hw;
734         struct rtnl_link_stats64 *ons;
735         struct rtnl_link_stats64 *ns;   /* netdev stats */
736         struct i40e_eth_stats *oes;
737         struct i40e_eth_stats *es;     /* device's eth stats */
738         u32 tx_restart, tx_busy;
739         u32 rx_page, rx_buf;
740         u64 rx_p, rx_b;
741         u64 tx_p, tx_b;
742         int i;
743         u16 q;
744
745         if (test_bit(__I40E_DOWN, &vsi->state) ||
746             test_bit(__I40E_CONFIG_BUSY, &pf->state))
747                 return;
748
749         ns = i40e_get_vsi_stats_struct(vsi);
750         ons = &vsi->net_stats_offsets;
751         es = &vsi->eth_stats;
752         oes = &vsi->eth_stats_offsets;
753
754         /* Gather up the netdev and vsi stats that the driver collects
755          * on the fly during packet processing
756          */
757         rx_b = rx_p = 0;
758         tx_b = tx_p = 0;
759         tx_restart = tx_busy = 0;
760         rx_page = 0;
761         rx_buf = 0;
762         rcu_read_lock();
763         for (q = 0; q < vsi->num_queue_pairs; q++) {
764                 struct i40e_ring *p;
765                 u64 bytes, packets;
766                 unsigned int start;
767
768                 /* locate Tx ring */
769                 p = ACCESS_ONCE(vsi->tx_rings[q]);
770
771                 do {
772                         start = u64_stats_fetch_begin_bh(&p->syncp);
773                         packets = p->stats.packets;
774                         bytes = p->stats.bytes;
775                 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
776                 tx_b += bytes;
777                 tx_p += packets;
778                 tx_restart += p->tx_stats.restart_queue;
779                 tx_busy += p->tx_stats.tx_busy;
780
781                 /* Rx queue is part of the same block as Tx queue */
782                 p = &p[1];
783                 do {
784                         start = u64_stats_fetch_begin_bh(&p->syncp);
785                         packets = p->stats.packets;
786                         bytes = p->stats.bytes;
787                 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
788                 rx_b += bytes;
789                 rx_p += packets;
790                 rx_buf += p->rx_stats.alloc_buff_failed;
791                 rx_page += p->rx_stats.alloc_page_failed;
792         }
793         rcu_read_unlock();
794         vsi->tx_restart = tx_restart;
795         vsi->tx_busy = tx_busy;
796         vsi->rx_page_failed = rx_page;
797         vsi->rx_buf_failed = rx_buf;
798
799         ns->rx_packets = rx_p;
800         ns->rx_bytes = rx_b;
801         ns->tx_packets = tx_p;
802         ns->tx_bytes = tx_b;
803
804         i40e_update_eth_stats(vsi);
805         /* update netdev stats from eth stats */
806         ons->rx_errors = oes->rx_errors;
807         ns->rx_errors = es->rx_errors;
808         ons->tx_errors = oes->tx_errors;
809         ns->tx_errors = es->tx_errors;
810         ons->multicast = oes->rx_multicast;
811         ns->multicast = es->rx_multicast;
812         ons->tx_dropped = oes->tx_discards;
813         ns->tx_dropped = es->tx_discards;
814
815         /* Get the port data only if this is the main PF VSI */
816         if (vsi == pf->vsi[pf->lan_vsi]) {
817                 struct i40e_hw_port_stats *nsd = &pf->stats;
818                 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
819
820                 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
821                                    I40E_GLPRT_GORCL(hw->port),
822                                    pf->stat_offsets_loaded,
823                                    &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
824                 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
825                                    I40E_GLPRT_GOTCL(hw->port),
826                                    pf->stat_offsets_loaded,
827                                    &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
828                 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
829                                    pf->stat_offsets_loaded,
830                                    &osd->eth.rx_discards,
831                                    &nsd->eth.rx_discards);
832                 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
833                                    pf->stat_offsets_loaded,
834                                    &osd->eth.tx_discards,
835                                    &nsd->eth.tx_discards);
836                 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
837                                    I40E_GLPRT_MPRCL(hw->port),
838                                    pf->stat_offsets_loaded,
839                                    &osd->eth.rx_multicast,
840                                    &nsd->eth.rx_multicast);
841
842                 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
843                                    pf->stat_offsets_loaded,
844                                    &osd->tx_dropped_link_down,
845                                    &nsd->tx_dropped_link_down);
846
847                 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
848                                    pf->stat_offsets_loaded,
849                                    &osd->crc_errors, &nsd->crc_errors);
850                 ns->rx_crc_errors = nsd->crc_errors;
851
852                 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
853                                    pf->stat_offsets_loaded,
854                                    &osd->illegal_bytes, &nsd->illegal_bytes);
855                 ns->rx_errors = nsd->crc_errors
856                                 + nsd->illegal_bytes;
857
858                 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
859                                    pf->stat_offsets_loaded,
860                                    &osd->mac_local_faults,
861                                    &nsd->mac_local_faults);
862                 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
863                                    pf->stat_offsets_loaded,
864                                    &osd->mac_remote_faults,
865                                    &nsd->mac_remote_faults);
866
867                 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
868                                    pf->stat_offsets_loaded,
869                                    &osd->rx_length_errors,
870                                    &nsd->rx_length_errors);
871                 ns->rx_length_errors = nsd->rx_length_errors;
872
873                 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
874                                    pf->stat_offsets_loaded,
875                                    &osd->link_xon_rx, &nsd->link_xon_rx);
876                 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
877                                    pf->stat_offsets_loaded,
878                                    &osd->link_xon_tx, &nsd->link_xon_tx);
879                 i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
880                 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
881                                    pf->stat_offsets_loaded,
882                                    &osd->link_xoff_tx, &nsd->link_xoff_tx);
883
884                 for (i = 0; i < 8; i++) {
885                         i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
886                                            pf->stat_offsets_loaded,
887                                            &osd->priority_xon_rx[i],
888                                            &nsd->priority_xon_rx[i]);
889                         i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
890                                            pf->stat_offsets_loaded,
891                                            &osd->priority_xon_tx[i],
892                                            &nsd->priority_xon_tx[i]);
893                         i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
894                                            pf->stat_offsets_loaded,
895                                            &osd->priority_xoff_tx[i],
896                                            &nsd->priority_xoff_tx[i]);
897                         i40e_stat_update32(hw,
898                                            I40E_GLPRT_RXON2OFFCNT(hw->port, i),
899                                            pf->stat_offsets_loaded,
900                                            &osd->priority_xon_2_xoff[i],
901                                            &nsd->priority_xon_2_xoff[i]);
902                 }
903
904                 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
905                                    I40E_GLPRT_PRC64L(hw->port),
906                                    pf->stat_offsets_loaded,
907                                    &osd->rx_size_64, &nsd->rx_size_64);
908                 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
909                                    I40E_GLPRT_PRC127L(hw->port),
910                                    pf->stat_offsets_loaded,
911                                    &osd->rx_size_127, &nsd->rx_size_127);
912                 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
913                                    I40E_GLPRT_PRC255L(hw->port),
914                                    pf->stat_offsets_loaded,
915                                    &osd->rx_size_255, &nsd->rx_size_255);
916                 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
917                                    I40E_GLPRT_PRC511L(hw->port),
918                                    pf->stat_offsets_loaded,
919                                    &osd->rx_size_511, &nsd->rx_size_511);
920                 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
921                                    I40E_GLPRT_PRC1023L(hw->port),
922                                    pf->stat_offsets_loaded,
923                                    &osd->rx_size_1023, &nsd->rx_size_1023);
924                 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
925                                    I40E_GLPRT_PRC1522L(hw->port),
926                                    pf->stat_offsets_loaded,
927                                    &osd->rx_size_1522, &nsd->rx_size_1522);
928                 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
929                                    I40E_GLPRT_PRC9522L(hw->port),
930                                    pf->stat_offsets_loaded,
931                                    &osd->rx_size_big, &nsd->rx_size_big);
932
933                 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
934                                    I40E_GLPRT_PTC64L(hw->port),
935                                    pf->stat_offsets_loaded,
936                                    &osd->tx_size_64, &nsd->tx_size_64);
937                 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
938                                    I40E_GLPRT_PTC127L(hw->port),
939                                    pf->stat_offsets_loaded,
940                                    &osd->tx_size_127, &nsd->tx_size_127);
941                 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
942                                    I40E_GLPRT_PTC255L(hw->port),
943                                    pf->stat_offsets_loaded,
944                                    &osd->tx_size_255, &nsd->tx_size_255);
945                 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
946                                    I40E_GLPRT_PTC511L(hw->port),
947                                    pf->stat_offsets_loaded,
948                                    &osd->tx_size_511, &nsd->tx_size_511);
949                 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
950                                    I40E_GLPRT_PTC1023L(hw->port),
951                                    pf->stat_offsets_loaded,
952                                    &osd->tx_size_1023, &nsd->tx_size_1023);
953                 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
954                                    I40E_GLPRT_PTC1522L(hw->port),
955                                    pf->stat_offsets_loaded,
956                                    &osd->tx_size_1522, &nsd->tx_size_1522);
957                 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
958                                    I40E_GLPRT_PTC9522L(hw->port),
959                                    pf->stat_offsets_loaded,
960                                    &osd->tx_size_big, &nsd->tx_size_big);
961
962                 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
963                                    pf->stat_offsets_loaded,
964                                    &osd->rx_undersize, &nsd->rx_undersize);
965                 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
966                                    pf->stat_offsets_loaded,
967                                    &osd->rx_fragments, &nsd->rx_fragments);
968                 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
969                                    pf->stat_offsets_loaded,
970                                    &osd->rx_oversize, &nsd->rx_oversize);
971                 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
972                                    pf->stat_offsets_loaded,
973                                    &osd->rx_jabber, &nsd->rx_jabber);
974         }
975
976         pf->stat_offsets_loaded = true;
977 }
978
979 /**
980  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
981  * @vsi: the VSI to be searched
982  * @macaddr: the MAC address
983  * @vlan: the vlan
984  * @is_vf: make sure its a vf filter, else doesn't matter
985  * @is_netdev: make sure its a netdev filter, else doesn't matter
986  *
987  * Returns ptr to the filter object or NULL
988  **/
989 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
990                                                 u8 *macaddr, s16 vlan,
991                                                 bool is_vf, bool is_netdev)
992 {
993         struct i40e_mac_filter *f;
994
995         if (!vsi || !macaddr)
996                 return NULL;
997
998         list_for_each_entry(f, &vsi->mac_filter_list, list) {
999                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1000                     (vlan == f->vlan)    &&
1001                     (!is_vf || f->is_vf) &&
1002                     (!is_netdev || f->is_netdev))
1003                         return f;
1004         }
1005         return NULL;
1006 }
1007
1008 /**
1009  * i40e_find_mac - Find a mac addr in the macvlan filters list
1010  * @vsi: the VSI to be searched
1011  * @macaddr: the MAC address we are searching for
1012  * @is_vf: make sure its a vf filter, else doesn't matter
1013  * @is_netdev: make sure its a netdev filter, else doesn't matter
1014  *
1015  * Returns the first filter with the provided MAC address or NULL if
1016  * MAC address was not found
1017  **/
1018 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1019                                       bool is_vf, bool is_netdev)
1020 {
1021         struct i40e_mac_filter *f;
1022
1023         if (!vsi || !macaddr)
1024                 return NULL;
1025
1026         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1027                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1028                     (!is_vf || f->is_vf) &&
1029                     (!is_netdev || f->is_netdev))
1030                         return f;
1031         }
1032         return NULL;
1033 }
1034
1035 /**
1036  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1037  * @vsi: the VSI to be searched
1038  *
1039  * Returns true if VSI is in vlan mode or false otherwise
1040  **/
1041 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1042 {
1043         struct i40e_mac_filter *f;
1044
1045         /* Only -1 for all the filters denotes not in vlan mode
1046          * so we have to go through all the list in order to make sure
1047          */
1048         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1049                 if (f->vlan >= 0)
1050                         return true;
1051         }
1052
1053         return false;
1054 }
1055
1056 /**
1057  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1058  * @vsi: the VSI to be searched
1059  * @macaddr: the mac address to be filtered
1060  * @is_vf: true if it is a vf
1061  * @is_netdev: true if it is a netdev
1062  *
1063  * Goes through all the macvlan filters and adds a
1064  * macvlan filter for each unique vlan that already exists
1065  *
1066  * Returns first filter found on success, else NULL
1067  **/
1068 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1069                                              bool is_vf, bool is_netdev)
1070 {
1071         struct i40e_mac_filter *f;
1072
1073         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1074                 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1075                                       is_vf, is_netdev)) {
1076                         if (!i40e_add_filter(vsi, macaddr, f->vlan,
1077                                              is_vf, is_netdev))
1078                                 return NULL;
1079                 }
1080         }
1081
1082         return list_first_entry_or_null(&vsi->mac_filter_list,
1083                                         struct i40e_mac_filter, list);
1084 }
1085
1086 /**
1087  * i40e_add_filter - Add a mac/vlan filter to the VSI
1088  * @vsi: the VSI to be searched
1089  * @macaddr: the MAC address
1090  * @vlan: the vlan
1091  * @is_vf: make sure its a vf filter, else doesn't matter
1092  * @is_netdev: make sure its a netdev filter, else doesn't matter
1093  *
1094  * Returns ptr to the filter object or NULL when no memory available.
1095  **/
1096 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1097                                         u8 *macaddr, s16 vlan,
1098                                         bool is_vf, bool is_netdev)
1099 {
1100         struct i40e_mac_filter *f;
1101
1102         if (!vsi || !macaddr)
1103                 return NULL;
1104
1105         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1106         if (!f) {
1107                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1108                 if (!f)
1109                         goto add_filter_out;
1110
1111                 memcpy(f->macaddr, macaddr, ETH_ALEN);
1112                 f->vlan = vlan;
1113                 f->changed = true;
1114
1115                 INIT_LIST_HEAD(&f->list);
1116                 list_add(&f->list, &vsi->mac_filter_list);
1117         }
1118
1119         /* increment counter and add a new flag if needed */
1120         if (is_vf) {
1121                 if (!f->is_vf) {
1122                         f->is_vf = true;
1123                         f->counter++;
1124                 }
1125         } else if (is_netdev) {
1126                 if (!f->is_netdev) {
1127                         f->is_netdev = true;
1128                         f->counter++;
1129                 }
1130         } else {
1131                 f->counter++;
1132         }
1133
1134         /* changed tells sync_filters_subtask to
1135          * push the filter down to the firmware
1136          */
1137         if (f->changed) {
1138                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1139                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1140         }
1141
1142 add_filter_out:
1143         return f;
1144 }
1145
1146 /**
1147  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1148  * @vsi: the VSI to be searched
1149  * @macaddr: the MAC address
1150  * @vlan: the vlan
1151  * @is_vf: make sure it's a vf filter, else doesn't matter
1152  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1153  **/
1154 void i40e_del_filter(struct i40e_vsi *vsi,
1155                      u8 *macaddr, s16 vlan,
1156                      bool is_vf, bool is_netdev)
1157 {
1158         struct i40e_mac_filter *f;
1159
1160         if (!vsi || !macaddr)
1161                 return;
1162
1163         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1164         if (!f || f->counter == 0)
1165                 return;
1166
1167         if (is_vf) {
1168                 if (f->is_vf) {
1169                         f->is_vf = false;
1170                         f->counter--;
1171                 }
1172         } else if (is_netdev) {
1173                 if (f->is_netdev) {
1174                         f->is_netdev = false;
1175                         f->counter--;
1176                 }
1177         } else {
1178                 /* make sure we don't remove a filter in use by vf or netdev */
1179                 int min_f = 0;
1180                 min_f += (f->is_vf ? 1 : 0);
1181                 min_f += (f->is_netdev ? 1 : 0);
1182
1183                 if (f->counter > min_f)
1184                         f->counter--;
1185         }
1186
1187         /* counter == 0 tells sync_filters_subtask to
1188          * remove the filter from the firmware's list
1189          */
1190         if (f->counter == 0) {
1191                 f->changed = true;
1192                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1193                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1194         }
1195 }
1196
1197 /**
1198  * i40e_set_mac - NDO callback to set mac address
1199  * @netdev: network interface device structure
1200  * @p: pointer to an address structure
1201  *
1202  * Returns 0 on success, negative on failure
1203  **/
1204 static int i40e_set_mac(struct net_device *netdev, void *p)
1205 {
1206         struct i40e_netdev_priv *np = netdev_priv(netdev);
1207         struct i40e_vsi *vsi = np->vsi;
1208         struct sockaddr *addr = p;
1209         struct i40e_mac_filter *f;
1210
1211         if (!is_valid_ether_addr(addr->sa_data))
1212                 return -EADDRNOTAVAIL;
1213
1214         netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1215
1216         if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1217                 return 0;
1218
1219         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1220             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1221                 return -EADDRNOTAVAIL;
1222
1223         if (vsi->type == I40E_VSI_MAIN) {
1224                 i40e_status ret;
1225                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1226                                                 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1227                                                 addr->sa_data, NULL);
1228                 if (ret) {
1229                         netdev_info(netdev,
1230                                     "Addr change for Main VSI failed: %d\n",
1231                                     ret);
1232                         return -EADDRNOTAVAIL;
1233                 }
1234
1235                 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1236         }
1237
1238         /* In order to be sure to not drop any packets, add the new address
1239          * then delete the old one.
1240          */
1241         f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1242         if (!f)
1243                 return -ENOMEM;
1244
1245         i40e_sync_vsi_filters(vsi);
1246         i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1247         i40e_sync_vsi_filters(vsi);
1248
1249         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1250
1251         return 0;
1252 }
1253
1254 /**
1255  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1256  * @vsi: the VSI being setup
1257  * @ctxt: VSI context structure
1258  * @enabled_tc: Enabled TCs bitmap
1259  * @is_add: True if called before Add VSI
1260  *
1261  * Setup VSI queue mapping for enabled traffic classes.
1262  **/
1263 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1264                                      struct i40e_vsi_context *ctxt,
1265                                      u8 enabled_tc,
1266                                      bool is_add)
1267 {
1268         struct i40e_pf *pf = vsi->back;
1269         u16 sections = 0;
1270         u8 netdev_tc = 0;
1271         u16 numtc = 0;
1272         u16 qcount;
1273         u8 offset;
1274         u16 qmap;
1275         int i;
1276         u16 num_tc_qps = 0;
1277
1278         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1279         offset = 0;
1280
1281         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1282                 /* Find numtc from enabled TC bitmap */
1283                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1284                         if (enabled_tc & (1 << i)) /* TC is enabled */
1285                                 numtc++;
1286                 }
1287                 if (!numtc) {
1288                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1289                         numtc = 1;
1290                 }
1291         } else {
1292                 /* At least TC0 is enabled in case of non-DCB case */
1293                 numtc = 1;
1294         }
1295
1296         vsi->tc_config.numtc = numtc;
1297         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1298         /* Number of queues per enabled TC */
1299         num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
1300         num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1301
1302         /* Setup queue offset/count for all TCs for given VSI */
1303         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1304                 /* See if the given TC is enabled for the given VSI */
1305                 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1306                         int pow, num_qps;
1307
1308                         switch (vsi->type) {
1309                         case I40E_VSI_MAIN:
1310                                 qcount = min_t(int, pf->rss_size, num_tc_qps);
1311                                 break;
1312                         case I40E_VSI_FDIR:
1313                         case I40E_VSI_SRIOV:
1314                         case I40E_VSI_VMDQ2:
1315                         default:
1316                                 qcount = num_tc_qps;
1317                                 WARN_ON(i != 0);
1318                                 break;
1319                         }
1320                         vsi->tc_config.tc_info[i].qoffset = offset;
1321                         vsi->tc_config.tc_info[i].qcount = qcount;
1322
1323                         /* find the power-of-2 of the number of queue pairs */
1324                         num_qps = qcount;
1325                         pow = 0;
1326                         while (num_qps && ((1 << pow) < qcount)) {
1327                                 pow++;
1328                                 num_qps >>= 1;
1329                         }
1330
1331                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1332                         qmap =
1333                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1334                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1335
1336                         offset += qcount;
1337                 } else {
1338                         /* TC is not enabled so set the offset to
1339                          * default queue and allocate one queue
1340                          * for the given TC.
1341                          */
1342                         vsi->tc_config.tc_info[i].qoffset = 0;
1343                         vsi->tc_config.tc_info[i].qcount = 1;
1344                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1345
1346                         qmap = 0;
1347                 }
1348                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1349         }
1350
1351         /* Set actual Tx/Rx queue pairs */
1352         vsi->num_queue_pairs = offset;
1353
1354         /* Scheduler section valid can only be set for ADD VSI */
1355         if (is_add) {
1356                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1357
1358                 ctxt->info.up_enable_bits = enabled_tc;
1359         }
1360         if (vsi->type == I40E_VSI_SRIOV) {
1361                 ctxt->info.mapping_flags |=
1362                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1363                 for (i = 0; i < vsi->num_queue_pairs; i++)
1364                         ctxt->info.queue_mapping[i] =
1365                                                cpu_to_le16(vsi->base_queue + i);
1366         } else {
1367                 ctxt->info.mapping_flags |=
1368                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1369                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1370         }
1371         ctxt->info.valid_sections |= cpu_to_le16(sections);
1372 }
1373
1374 /**
1375  * i40e_set_rx_mode - NDO callback to set the netdev filters
1376  * @netdev: network interface device structure
1377  **/
1378 static void i40e_set_rx_mode(struct net_device *netdev)
1379 {
1380         struct i40e_netdev_priv *np = netdev_priv(netdev);
1381         struct i40e_mac_filter *f, *ftmp;
1382         struct i40e_vsi *vsi = np->vsi;
1383         struct netdev_hw_addr *uca;
1384         struct netdev_hw_addr *mca;
1385         struct netdev_hw_addr *ha;
1386
1387         /* add addr if not already in the filter list */
1388         netdev_for_each_uc_addr(uca, netdev) {
1389                 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1390                         if (i40e_is_vsi_in_vlan(vsi))
1391                                 i40e_put_mac_in_vlan(vsi, uca->addr,
1392                                                      false, true);
1393                         else
1394                                 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1395                                                 false, true);
1396                 }
1397         }
1398
1399         netdev_for_each_mc_addr(mca, netdev) {
1400                 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1401                         if (i40e_is_vsi_in_vlan(vsi))
1402                                 i40e_put_mac_in_vlan(vsi, mca->addr,
1403                                                      false, true);
1404                         else
1405                                 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1406                                                 false, true);
1407                 }
1408         }
1409
1410         /* remove filter if not in netdev list */
1411         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1412                 bool found = false;
1413
1414                 if (!f->is_netdev)
1415                         continue;
1416
1417                 if (is_multicast_ether_addr(f->macaddr)) {
1418                         netdev_for_each_mc_addr(mca, netdev) {
1419                                 if (ether_addr_equal(mca->addr, f->macaddr)) {
1420                                         found = true;
1421                                         break;
1422                                 }
1423                         }
1424                 } else {
1425                         netdev_for_each_uc_addr(uca, netdev) {
1426                                 if (ether_addr_equal(uca->addr, f->macaddr)) {
1427                                         found = true;
1428                                         break;
1429                                 }
1430                         }
1431
1432                         for_each_dev_addr(netdev, ha) {
1433                                 if (ether_addr_equal(ha->addr, f->macaddr)) {
1434                                         found = true;
1435                                         break;
1436                                 }
1437                         }
1438                 }
1439                 if (!found)
1440                         i40e_del_filter(
1441                            vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1442         }
1443
1444         /* check for other flag changes */
1445         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1446                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1447                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1448         }
1449 }
1450
1451 /**
1452  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1453  * @vsi: ptr to the VSI
1454  *
1455  * Push any outstanding VSI filter changes through the AdminQ.
1456  *
1457  * Returns 0 or error value
1458  **/
1459 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1460 {
1461         struct i40e_mac_filter *f, *ftmp;
1462         bool promisc_forced_on = false;
1463         bool add_happened = false;
1464         int filter_list_len = 0;
1465         u32 changed_flags = 0;
1466         i40e_status aq_ret = 0;
1467         struct i40e_pf *pf;
1468         int num_add = 0;
1469         int num_del = 0;
1470         u16 cmd_flags;
1471
1472         /* empty array typed pointers, kcalloc later */
1473         struct i40e_aqc_add_macvlan_element_data *add_list;
1474         struct i40e_aqc_remove_macvlan_element_data *del_list;
1475
1476         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1477                 usleep_range(1000, 2000);
1478         pf = vsi->back;
1479
1480         if (vsi->netdev) {
1481                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1482                 vsi->current_netdev_flags = vsi->netdev->flags;
1483         }
1484
1485         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1486                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1487
1488                 filter_list_len = pf->hw.aq.asq_buf_size /
1489                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1490                 del_list = kcalloc(filter_list_len,
1491                             sizeof(struct i40e_aqc_remove_macvlan_element_data),
1492                             GFP_KERNEL);
1493                 if (!del_list)
1494                         return -ENOMEM;
1495
1496                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1497                         if (!f->changed)
1498                                 continue;
1499
1500                         if (f->counter != 0)
1501                                 continue;
1502                         f->changed = false;
1503                         cmd_flags = 0;
1504
1505                         /* add to delete list */
1506                         memcpy(del_list[num_del].mac_addr,
1507                                f->macaddr, ETH_ALEN);
1508                         del_list[num_del].vlan_tag =
1509                                 cpu_to_le16((u16)(f->vlan ==
1510                                             I40E_VLAN_ANY ? 0 : f->vlan));
1511
1512                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1513                         del_list[num_del].flags = cmd_flags;
1514                         num_del++;
1515
1516                         /* unlink from filter list */
1517                         list_del(&f->list);
1518                         kfree(f);
1519
1520                         /* flush a full buffer */
1521                         if (num_del == filter_list_len) {
1522                                 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1523                                             vsi->seid, del_list, num_del,
1524                                             NULL);
1525                                 num_del = 0;
1526                                 memset(del_list, 0, sizeof(*del_list));
1527
1528                                 if (aq_ret)
1529                                         dev_info(&pf->pdev->dev,
1530                                                  "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1531                                                  aq_ret,
1532                                                  pf->hw.aq.asq_last_status);
1533                         }
1534                 }
1535                 if (num_del) {
1536                         aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1537                                                      del_list, num_del, NULL);
1538                         num_del = 0;
1539
1540                         if (aq_ret)
1541                                 dev_info(&pf->pdev->dev,
1542                                          "ignoring delete macvlan error, err %d, aq_err %d\n",
1543                                          aq_ret, pf->hw.aq.asq_last_status);
1544                 }
1545
1546                 kfree(del_list);
1547                 del_list = NULL;
1548
1549                 /* do all the adds now */
1550                 filter_list_len = pf->hw.aq.asq_buf_size /
1551                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1552                 add_list = kcalloc(filter_list_len,
1553                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1554                                GFP_KERNEL);
1555                 if (!add_list)
1556                         return -ENOMEM;
1557
1558                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1559                         if (!f->changed)
1560                                 continue;
1561
1562                         if (f->counter == 0)
1563                                 continue;
1564                         f->changed = false;
1565                         add_happened = true;
1566                         cmd_flags = 0;
1567
1568                         /* add to add array */
1569                         memcpy(add_list[num_add].mac_addr,
1570                                f->macaddr, ETH_ALEN);
1571                         add_list[num_add].vlan_tag =
1572                                 cpu_to_le16(
1573                                  (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1574                         add_list[num_add].queue_number = 0;
1575
1576                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1577                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
1578                         num_add++;
1579
1580                         /* flush a full buffer */
1581                         if (num_add == filter_list_len) {
1582                                 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1583                                                              add_list, num_add,
1584                                                              NULL);
1585                                 num_add = 0;
1586
1587                                 if (aq_ret)
1588                                         break;
1589                                 memset(add_list, 0, sizeof(*add_list));
1590                         }
1591                 }
1592                 if (num_add) {
1593                         aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1594                                                      add_list, num_add, NULL);
1595                         num_add = 0;
1596                 }
1597                 kfree(add_list);
1598                 add_list = NULL;
1599
1600                 if (add_happened && (!aq_ret)) {
1601                         /* do nothing */;
1602                 } else if (add_happened && (aq_ret)) {
1603                         dev_info(&pf->pdev->dev,
1604                                  "add filter failed, err %d, aq_err %d\n",
1605                                  aq_ret, pf->hw.aq.asq_last_status);
1606                         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1607                             !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1608                                       &vsi->state)) {
1609                                 promisc_forced_on = true;
1610                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1611                                         &vsi->state);
1612                                 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1613                         }
1614                 }
1615         }
1616
1617         /* check for changes in promiscuous modes */
1618         if (changed_flags & IFF_ALLMULTI) {
1619                 bool cur_multipromisc;
1620                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1621                 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1622                                                                vsi->seid,
1623                                                                cur_multipromisc,
1624                                                                NULL);
1625                 if (aq_ret)
1626                         dev_info(&pf->pdev->dev,
1627                                  "set multi promisc failed, err %d, aq_err %d\n",
1628                                  aq_ret, pf->hw.aq.asq_last_status);
1629         }
1630         if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1631                 bool cur_promisc;
1632                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1633                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1634                                         &vsi->state));
1635                 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1636                                                              vsi->seid,
1637                                                              cur_promisc, NULL);
1638                 if (aq_ret)
1639                         dev_info(&pf->pdev->dev,
1640                                  "set uni promisc failed, err %d, aq_err %d\n",
1641                                  aq_ret, pf->hw.aq.asq_last_status);
1642                 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1643                                                    vsi->seid,
1644                                                    cur_promisc, NULL);
1645                 if (aq_ret)
1646                         dev_info(&pf->pdev->dev,
1647                                  "set brdcast promisc failed, err %d, aq_err %d\n",
1648                                  aq_ret, pf->hw.aq.asq_last_status);
1649         }
1650
1651         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1652         return 0;
1653 }
1654
1655 /**
1656  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1657  * @pf: board private structure
1658  **/
1659 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1660 {
1661         int v;
1662
1663         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1664                 return;
1665         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1666
1667         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1668                 if (pf->vsi[v] &&
1669                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1670                         i40e_sync_vsi_filters(pf->vsi[v]);
1671         }
1672 }
1673
1674 /**
1675  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1676  * @netdev: network interface device structure
1677  * @new_mtu: new value for maximum frame size
1678  *
1679  * Returns 0 on success, negative on failure
1680  **/
1681 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1682 {
1683         struct i40e_netdev_priv *np = netdev_priv(netdev);
1684         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1685         struct i40e_vsi *vsi = np->vsi;
1686
1687         /* MTU < 68 is an error and causes problems on some kernels */
1688         if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1689                 return -EINVAL;
1690
1691         netdev_info(netdev, "changing MTU from %d to %d\n",
1692                     netdev->mtu, new_mtu);
1693         netdev->mtu = new_mtu;
1694         if (netif_running(netdev))
1695                 i40e_vsi_reinit_locked(vsi);
1696
1697         return 0;
1698 }
1699
1700 /**
1701  * i40e_ioctl - Access the hwtstamp interface
1702  * @netdev: network interface device structure
1703  * @ifr: interface request data
1704  * @cmd: ioctl command
1705  **/
1706 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1707 {
1708         struct i40e_netdev_priv *np = netdev_priv(netdev);
1709         struct i40e_pf *pf = np->vsi->back;
1710
1711         switch (cmd) {
1712         case SIOCGHWTSTAMP:
1713                 return i40e_ptp_get_ts_config(pf, ifr);
1714         case SIOCSHWTSTAMP:
1715                 return i40e_ptp_set_ts_config(pf, ifr);
1716         default:
1717                 return -EOPNOTSUPP;
1718         }
1719 }
1720
1721 /**
1722  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1723  * @vsi: the vsi being adjusted
1724  **/
1725 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1726 {
1727         struct i40e_vsi_context ctxt;
1728         i40e_status ret;
1729
1730         if ((vsi->info.valid_sections &
1731              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1732             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1733                 return;  /* already enabled */
1734
1735         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1736         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1737                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1738
1739         ctxt.seid = vsi->seid;
1740         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1741         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1742         if (ret) {
1743                 dev_info(&vsi->back->pdev->dev,
1744                          "%s: update vsi failed, aq_err=%d\n",
1745                          __func__, vsi->back->hw.aq.asq_last_status);
1746         }
1747 }
1748
1749 /**
1750  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1751  * @vsi: the vsi being adjusted
1752  **/
1753 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1754 {
1755         struct i40e_vsi_context ctxt;
1756         i40e_status ret;
1757
1758         if ((vsi->info.valid_sections &
1759              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1760             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1761              I40E_AQ_VSI_PVLAN_EMOD_MASK))
1762                 return;  /* already disabled */
1763
1764         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1765         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1766                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1767
1768         ctxt.seid = vsi->seid;
1769         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1770         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1771         if (ret) {
1772                 dev_info(&vsi->back->pdev->dev,
1773                          "%s: update vsi failed, aq_err=%d\n",
1774                          __func__, vsi->back->hw.aq.asq_last_status);
1775         }
1776 }
1777
1778 /**
1779  * i40e_vlan_rx_register - Setup or shutdown vlan offload
1780  * @netdev: network interface to be adjusted
1781  * @features: netdev features to test if VLAN offload is enabled or not
1782  **/
1783 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1784 {
1785         struct i40e_netdev_priv *np = netdev_priv(netdev);
1786         struct i40e_vsi *vsi = np->vsi;
1787
1788         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1789                 i40e_vlan_stripping_enable(vsi);
1790         else
1791                 i40e_vlan_stripping_disable(vsi);
1792 }
1793
1794 /**
1795  * i40e_vsi_add_vlan - Add vsi membership for given vlan
1796  * @vsi: the vsi being configured
1797  * @vid: vlan id to be added (0 = untagged only , -1 = any)
1798  **/
1799 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1800 {
1801         struct i40e_mac_filter *f, *add_f;
1802         bool is_netdev, is_vf;
1803
1804         is_vf = (vsi->type == I40E_VSI_SRIOV);
1805         is_netdev = !!(vsi->netdev);
1806
1807         if (is_netdev) {
1808                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1809                                         is_vf, is_netdev);
1810                 if (!add_f) {
1811                         dev_info(&vsi->back->pdev->dev,
1812                                  "Could not add vlan filter %d for %pM\n",
1813                                  vid, vsi->netdev->dev_addr);
1814                         return -ENOMEM;
1815                 }
1816         }
1817
1818         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1819                 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1820                 if (!add_f) {
1821                         dev_info(&vsi->back->pdev->dev,
1822                                  "Could not add vlan filter %d for %pM\n",
1823                                  vid, f->macaddr);
1824                         return -ENOMEM;
1825                 }
1826         }
1827
1828         /* Now if we add a vlan tag, make sure to check if it is the first
1829          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1830          * with 0, so we now accept untagged and specified tagged traffic
1831          * (and not any taged and untagged)
1832          */
1833         if (vid > 0) {
1834                 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1835                                                   I40E_VLAN_ANY,
1836                                                   is_vf, is_netdev)) {
1837                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
1838                                         I40E_VLAN_ANY, is_vf, is_netdev);
1839                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1840                                                 is_vf, is_netdev);
1841                         if (!add_f) {
1842                                 dev_info(&vsi->back->pdev->dev,
1843                                          "Could not add filter 0 for %pM\n",
1844                                          vsi->netdev->dev_addr);
1845                                 return -ENOMEM;
1846                         }
1847                 }
1848         }
1849
1850         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
1851         if (vid > 0 && !vsi->info.pvid) {
1852                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1853                         if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1854                                              is_vf, is_netdev)) {
1855                                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1856                                                 is_vf, is_netdev);
1857                                 add_f = i40e_add_filter(vsi, f->macaddr,
1858                                                         0, is_vf, is_netdev);
1859                                 if (!add_f) {
1860                                         dev_info(&vsi->back->pdev->dev,
1861                                                  "Could not add filter 0 for %pM\n",
1862                                                  f->macaddr);
1863                                         return -ENOMEM;
1864                                 }
1865                         }
1866                 }
1867         }
1868
1869         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1870             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1871                 return 0;
1872
1873         return i40e_sync_vsi_filters(vsi);
1874 }
1875
1876 /**
1877  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1878  * @vsi: the vsi being configured
1879  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
1880  *
1881  * Return: 0 on success or negative otherwise
1882  **/
1883 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1884 {
1885         struct net_device *netdev = vsi->netdev;
1886         struct i40e_mac_filter *f, *add_f;
1887         bool is_vf, is_netdev;
1888         int filter_count = 0;
1889
1890         is_vf = (vsi->type == I40E_VSI_SRIOV);
1891         is_netdev = !!(netdev);
1892
1893         if (is_netdev)
1894                 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1895
1896         list_for_each_entry(f, &vsi->mac_filter_list, list)
1897                 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1898
1899         /* go through all the filters for this VSI and if there is only
1900          * vid == 0 it means there are no other filters, so vid 0 must
1901          * be replaced with -1. This signifies that we should from now
1902          * on accept any traffic (with any tag present, or untagged)
1903          */
1904         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1905                 if (is_netdev) {
1906                         if (f->vlan &&
1907                             ether_addr_equal(netdev->dev_addr, f->macaddr))
1908                                 filter_count++;
1909                 }
1910
1911                 if (f->vlan)
1912                         filter_count++;
1913         }
1914
1915         if (!filter_count && is_netdev) {
1916                 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1917                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1918                                     is_vf, is_netdev);
1919                 if (!f) {
1920                         dev_info(&vsi->back->pdev->dev,
1921                                  "Could not add filter %d for %pM\n",
1922                                  I40E_VLAN_ANY, netdev->dev_addr);
1923                         return -ENOMEM;
1924                 }
1925         }
1926
1927         if (!filter_count) {
1928                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1929                         i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1930                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1931                                             is_vf, is_netdev);
1932                         if (!add_f) {
1933                                 dev_info(&vsi->back->pdev->dev,
1934                                          "Could not add filter %d for %pM\n",
1935                                          I40E_VLAN_ANY, f->macaddr);
1936                                 return -ENOMEM;
1937                         }
1938                 }
1939         }
1940
1941         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1942             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1943                 return 0;
1944
1945         return i40e_sync_vsi_filters(vsi);
1946 }
1947
1948 /**
1949  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1950  * @netdev: network interface to be adjusted
1951  * @vid: vlan id to be added
1952  *
1953  * net_device_ops implementation for adding vlan ids
1954  **/
1955 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1956                                 __always_unused __be16 proto, u16 vid)
1957 {
1958         struct i40e_netdev_priv *np = netdev_priv(netdev);
1959         struct i40e_vsi *vsi = np->vsi;
1960         int ret = 0;
1961
1962         if (vid > 4095)
1963                 return -EINVAL;
1964
1965         netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1966
1967         /* If the network stack called us with vid = 0, we should
1968          * indicate to i40e_vsi_add_vlan() that we want to receive
1969          * any traffic (i.e. with any vlan tag, or untagged)
1970          */
1971         ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1972
1973         if (!ret && (vid < VLAN_N_VID))
1974                 set_bit(vid, vsi->active_vlans);
1975
1976         return ret;
1977 }
1978
1979 /**
1980  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1981  * @netdev: network interface to be adjusted
1982  * @vid: vlan id to be removed
1983  *
1984  * net_device_ops implementation for adding vlan ids
1985  **/
1986 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1987                                  __always_unused __be16 proto, u16 vid)
1988 {
1989         struct i40e_netdev_priv *np = netdev_priv(netdev);
1990         struct i40e_vsi *vsi = np->vsi;
1991
1992         netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1993
1994         /* return code is ignored as there is nothing a user
1995          * can do about failure to remove and a log message was
1996          * already printed from the other function
1997          */
1998         i40e_vsi_kill_vlan(vsi, vid);
1999
2000         clear_bit(vid, vsi->active_vlans);
2001
2002         return 0;
2003 }
2004
2005 /**
2006  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2007  * @vsi: the vsi being brought back up
2008  **/
2009 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2010 {
2011         u16 vid;
2012
2013         if (!vsi->netdev)
2014                 return;
2015
2016         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2017
2018         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2019                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2020                                      vid);
2021 }
2022
2023 /**
2024  * i40e_vsi_add_pvid - Add pvid for the VSI
2025  * @vsi: the vsi being adjusted
2026  * @vid: the vlan id to set as a PVID
2027  **/
2028 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2029 {
2030         struct i40e_vsi_context ctxt;
2031         i40e_status aq_ret;
2032
2033         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2034         vsi->info.pvid = cpu_to_le16(vid);
2035         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2036                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2037                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2038
2039         ctxt.seid = vsi->seid;
2040         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2041         aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2042         if (aq_ret) {
2043                 dev_info(&vsi->back->pdev->dev,
2044                          "%s: update vsi failed, aq_err=%d\n",
2045                          __func__, vsi->back->hw.aq.asq_last_status);
2046                 return -ENOENT;
2047         }
2048
2049         return 0;
2050 }
2051
2052 /**
2053  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2054  * @vsi: the vsi being adjusted
2055  *
2056  * Just use the vlan_rx_register() service to put it back to normal
2057  **/
2058 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2059 {
2060         i40e_vlan_stripping_disable(vsi);
2061
2062         vsi->info.pvid = 0;
2063 }
2064
2065 /**
2066  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2067  * @vsi: ptr to the VSI
2068  *
2069  * If this function returns with an error, then it's possible one or
2070  * more of the rings is populated (while the rest are not).  It is the
2071  * callers duty to clean those orphaned rings.
2072  *
2073  * Return 0 on success, negative on failure
2074  **/
2075 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2076 {
2077         int i, err = 0;
2078
2079         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2080                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2081
2082         return err;
2083 }
2084
2085 /**
2086  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2087  * @vsi: ptr to the VSI
2088  *
2089  * Free VSI's transmit software resources
2090  **/
2091 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2092 {
2093         int i;
2094
2095         if (!vsi->tx_rings)
2096                 return;
2097
2098         for (i = 0; i < vsi->num_queue_pairs; i++)
2099                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2100                         i40e_free_tx_resources(vsi->tx_rings[i]);
2101 }
2102
2103 /**
2104  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2105  * @vsi: ptr to the VSI
2106  *
2107  * If this function returns with an error, then it's possible one or
2108  * more of the rings is populated (while the rest are not).  It is the
2109  * callers duty to clean those orphaned rings.
2110  *
2111  * Return 0 on success, negative on failure
2112  **/
2113 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2114 {
2115         int i, err = 0;
2116
2117         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2118                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2119         return err;
2120 }
2121
2122 /**
2123  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2124  * @vsi: ptr to the VSI
2125  *
2126  * Free all receive software resources
2127  **/
2128 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2129 {
2130         int i;
2131
2132         if (!vsi->rx_rings)
2133                 return;
2134
2135         for (i = 0; i < vsi->num_queue_pairs; i++)
2136                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2137                         i40e_free_rx_resources(vsi->rx_rings[i]);
2138 }
2139
2140 /**
2141  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2142  * @ring: The Tx ring to configure
2143  *
2144  * Configure the Tx descriptor ring in the HMC context.
2145  **/
2146 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2147 {
2148         struct i40e_vsi *vsi = ring->vsi;
2149         u16 pf_q = vsi->base_queue + ring->queue_index;
2150         struct i40e_hw *hw = &vsi->back->hw;
2151         struct i40e_hmc_obj_txq tx_ctx;
2152         i40e_status err = 0;
2153         u32 qtx_ctl = 0;
2154
2155         /* some ATR related tx ring init */
2156         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2157                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2158                 ring->atr_count = 0;
2159         } else {
2160                 ring->atr_sample_rate = 0;
2161         }
2162
2163         /* initialize XPS */
2164         if (ring->q_vector && ring->netdev &&
2165             vsi->tc_config.numtc <= 1 &&
2166             !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2167                 netif_set_xps_queue(ring->netdev,
2168                                     &ring->q_vector->affinity_mask,
2169                                     ring->queue_index);
2170
2171         /* clear the context structure first */
2172         memset(&tx_ctx, 0, sizeof(tx_ctx));
2173
2174         tx_ctx.new_context = 1;
2175         tx_ctx.base = (ring->dma / 128);
2176         tx_ctx.qlen = ring->count;
2177         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2178                                                I40E_FLAG_FD_ATR_ENABLED));
2179         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2180
2181         /* As part of VSI creation/update, FW allocates certain
2182          * Tx arbitration queue sets for each TC enabled for
2183          * the VSI. The FW returns the handles to these queue
2184          * sets as part of the response buffer to Add VSI,
2185          * Update VSI, etc. AQ commands. It is expected that
2186          * these queue set handles be associated with the Tx
2187          * queues by the driver as part of the TX queue context
2188          * initialization. This has to be done regardless of
2189          * DCB as by default everything is mapped to TC0.
2190          */
2191         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2192         tx_ctx.rdylist_act = 0;
2193
2194         /* clear the context in the HMC */
2195         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2196         if (err) {
2197                 dev_info(&vsi->back->pdev->dev,
2198                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2199                          ring->queue_index, pf_q, err);
2200                 return -ENOMEM;
2201         }
2202
2203         /* set the context in the HMC */
2204         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2205         if (err) {
2206                 dev_info(&vsi->back->pdev->dev,
2207                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2208                          ring->queue_index, pf_q, err);
2209                 return -ENOMEM;
2210         }
2211
2212         /* Now associate this queue with this PCI function */
2213         if (vsi->type == I40E_VSI_VMDQ2)
2214                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2215         else
2216                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2217         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2218                     I40E_QTX_CTL_PF_INDX_MASK);
2219         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2220         i40e_flush(hw);
2221
2222         clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2223
2224         /* cache tail off for easier writes later */
2225         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2226
2227         return 0;
2228 }
2229
2230 /**
2231  * i40e_configure_rx_ring - Configure a receive ring context
2232  * @ring: The Rx ring to configure
2233  *
2234  * Configure the Rx descriptor ring in the HMC context.
2235  **/
2236 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2237 {
2238         struct i40e_vsi *vsi = ring->vsi;
2239         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2240         u16 pf_q = vsi->base_queue + ring->queue_index;
2241         struct i40e_hw *hw = &vsi->back->hw;
2242         struct i40e_hmc_obj_rxq rx_ctx;
2243         i40e_status err = 0;
2244
2245         ring->state = 0;
2246
2247         /* clear the context structure first */
2248         memset(&rx_ctx, 0, sizeof(rx_ctx));
2249
2250         ring->rx_buf_len = vsi->rx_buf_len;
2251         ring->rx_hdr_len = vsi->rx_hdr_len;
2252
2253         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2254         rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2255
2256         rx_ctx.base = (ring->dma / 128);
2257         rx_ctx.qlen = ring->count;
2258
2259         if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2260                 set_ring_16byte_desc_enabled(ring);
2261                 rx_ctx.dsize = 0;
2262         } else {
2263                 rx_ctx.dsize = 1;
2264         }
2265
2266         rx_ctx.dtype = vsi->dtype;
2267         if (vsi->dtype) {
2268                 set_ring_ps_enabled(ring);
2269                 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2270                                   I40E_RX_SPLIT_IP      |
2271                                   I40E_RX_SPLIT_TCP_UDP |
2272                                   I40E_RX_SPLIT_SCTP;
2273         } else {
2274                 rx_ctx.hsplit_0 = 0;
2275         }
2276
2277         rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2278                                   (chain_len * ring->rx_buf_len));
2279         rx_ctx.tphrdesc_ena = 1;
2280         rx_ctx.tphwdesc_ena = 1;
2281         rx_ctx.tphdata_ena = 1;
2282         rx_ctx.tphhead_ena = 1;
2283         if (hw->revision_id == 0)
2284                 rx_ctx.lrxqthresh = 0;
2285         else
2286                 rx_ctx.lrxqthresh = 2;
2287         rx_ctx.crcstrip = 1;
2288         rx_ctx.l2tsel = 1;
2289         rx_ctx.showiv = 1;
2290
2291         /* clear the context in the HMC */
2292         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2293         if (err) {
2294                 dev_info(&vsi->back->pdev->dev,
2295                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2296                          ring->queue_index, pf_q, err);
2297                 return -ENOMEM;
2298         }
2299
2300         /* set the context in the HMC */
2301         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2302         if (err) {
2303                 dev_info(&vsi->back->pdev->dev,
2304                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2305                          ring->queue_index, pf_q, err);
2306                 return -ENOMEM;
2307         }
2308
2309         /* cache tail for quicker writes, and clear the reg before use */
2310         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2311         writel(0, ring->tail);
2312
2313         i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2314
2315         return 0;
2316 }
2317
2318 /**
2319  * i40e_vsi_configure_tx - Configure the VSI for Tx
2320  * @vsi: VSI structure describing this set of rings and resources
2321  *
2322  * Configure the Tx VSI for operation.
2323  **/
2324 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2325 {
2326         int err = 0;
2327         u16 i;
2328
2329         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2330                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2331
2332         return err;
2333 }
2334
2335 /**
2336  * i40e_vsi_configure_rx - Configure the VSI for Rx
2337  * @vsi: the VSI being configured
2338  *
2339  * Configure the Rx VSI for operation.
2340  **/
2341 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2342 {
2343         int err = 0;
2344         u16 i;
2345
2346         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2347                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2348                                + ETH_FCS_LEN + VLAN_HLEN;
2349         else
2350                 vsi->max_frame = I40E_RXBUFFER_2048;
2351
2352         /* figure out correct receive buffer length */
2353         switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2354                                     I40E_FLAG_RX_PS_ENABLED)) {
2355         case I40E_FLAG_RX_1BUF_ENABLED:
2356                 vsi->rx_hdr_len = 0;
2357                 vsi->rx_buf_len = vsi->max_frame;
2358                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2359                 break;
2360         case I40E_FLAG_RX_PS_ENABLED:
2361                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2362                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2363                 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2364                 break;
2365         default:
2366                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2367                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2368                 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2369                 break;
2370         }
2371
2372         /* round up for the chip's needs */
2373         vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2374                                 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2375         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2376                                 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2377
2378         /* set up individual rings */
2379         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2380                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2381
2382         return err;
2383 }
2384
2385 /**
2386  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2387  * @vsi: ptr to the VSI
2388  **/
2389 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2390 {
2391         u16 qoffset, qcount;
2392         int i, n;
2393
2394         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2395                 return;
2396
2397         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2398                 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2399                         continue;
2400
2401                 qoffset = vsi->tc_config.tc_info[n].qoffset;
2402                 qcount = vsi->tc_config.tc_info[n].qcount;
2403                 for (i = qoffset; i < (qoffset + qcount); i++) {
2404                         struct i40e_ring *rx_ring = vsi->rx_rings[i];
2405                         struct i40e_ring *tx_ring = vsi->tx_rings[i];
2406                         rx_ring->dcb_tc = n;
2407                         tx_ring->dcb_tc = n;
2408                 }
2409         }
2410 }
2411
2412 /**
2413  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2414  * @vsi: ptr to the VSI
2415  **/
2416 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2417 {
2418         if (vsi->netdev)
2419                 i40e_set_rx_mode(vsi->netdev);
2420 }
2421
2422 /**
2423  * i40e_vsi_configure - Set up the VSI for action
2424  * @vsi: the VSI being configured
2425  **/
2426 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2427 {
2428         int err;
2429
2430         i40e_set_vsi_rx_mode(vsi);
2431         i40e_restore_vlan(vsi);
2432         i40e_vsi_config_dcb_rings(vsi);
2433         err = i40e_vsi_configure_tx(vsi);
2434         if (!err)
2435                 err = i40e_vsi_configure_rx(vsi);
2436
2437         return err;
2438 }
2439
2440 /**
2441  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2442  * @vsi: the VSI being configured
2443  **/
2444 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2445 {
2446         struct i40e_pf *pf = vsi->back;
2447         struct i40e_q_vector *q_vector;
2448         struct i40e_hw *hw = &pf->hw;
2449         u16 vector;
2450         int i, q;
2451         u32 val;
2452         u32 qp;
2453
2454         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2455          * and PFINT_LNKLSTn registers, e.g.:
2456          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2457          */
2458         qp = vsi->base_queue;
2459         vector = vsi->base_vector;
2460         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2461                 q_vector = vsi->q_vectors[i];
2462                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2463                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2464                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2465                      q_vector->rx.itr);
2466                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2467                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2468                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2469                      q_vector->tx.itr);
2470
2471                 /* Linked list for the queuepairs assigned to this vector */
2472                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2473                 for (q = 0; q < q_vector->num_ringpairs; q++) {
2474                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2475                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2476                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2477                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2478                               (I40E_QUEUE_TYPE_TX
2479                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2480
2481                         wr32(hw, I40E_QINT_RQCTL(qp), val);
2482
2483                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2484                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2485                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2486                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2487                               (I40E_QUEUE_TYPE_RX
2488                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2489
2490                         /* Terminate the linked list */
2491                         if (q == (q_vector->num_ringpairs - 1))
2492                                 val |= (I40E_QUEUE_END_OF_LIST
2493                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2494
2495                         wr32(hw, I40E_QINT_TQCTL(qp), val);
2496                         qp++;
2497                 }
2498         }
2499
2500         i40e_flush(hw);
2501 }
2502
2503 /**
2504  * i40e_enable_misc_int_causes - enable the non-queue interrupts
2505  * @hw: ptr to the hardware info
2506  **/
2507 static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2508 {
2509         u32 val;
2510
2511         /* clear things first */
2512         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2513         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2514
2515         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2516               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2517               I40E_PFINT_ICR0_ENA_GRST_MASK          |
2518               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2519               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2520               I40E_PFINT_ICR0_ENA_TIMESYNC_MASK      |
2521               I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK  |
2522               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2523               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2524               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2525
2526         wr32(hw, I40E_PFINT_ICR0_ENA, val);
2527
2528         /* SW_ITR_IDX = 0, but don't change INTENA */
2529         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2530                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2531
2532         /* OTHER_ITR_IDX = 0 */
2533         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2534 }
2535
2536 /**
2537  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2538  * @vsi: the VSI being configured
2539  **/
2540 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2541 {
2542         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2543         struct i40e_pf *pf = vsi->back;
2544         struct i40e_hw *hw = &pf->hw;
2545         u32 val;
2546
2547         /* set the ITR configuration */
2548         q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2549         q_vector->rx.latency_range = I40E_LOW_LATENCY;
2550         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2551         q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2552         q_vector->tx.latency_range = I40E_LOW_LATENCY;
2553         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2554
2555         i40e_enable_misc_int_causes(hw);
2556
2557         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2558         wr32(hw, I40E_PFINT_LNKLST0, 0);
2559
2560         /* Associate the queue pair to the vector and enable the q int */
2561         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
2562               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2563               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2564
2565         wr32(hw, I40E_QINT_RQCTL(0), val);
2566
2567         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
2568               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2569               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2570
2571         wr32(hw, I40E_QINT_TQCTL(0), val);
2572         i40e_flush(hw);
2573 }
2574
2575 /**
2576  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2577  * @pf: board private structure
2578  **/
2579 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2580 {
2581         struct i40e_hw *hw = &pf->hw;
2582
2583         wr32(hw, I40E_PFINT_DYN_CTL0,
2584              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2585         i40e_flush(hw);
2586 }
2587
2588 /**
2589  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2590  * @pf: board private structure
2591  **/
2592 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2593 {
2594         struct i40e_hw *hw = &pf->hw;
2595         u32 val;
2596
2597         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
2598               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2599               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2600
2601         wr32(hw, I40E_PFINT_DYN_CTL0, val);
2602         i40e_flush(hw);
2603 }
2604
2605 /**
2606  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2607  * @vsi: pointer to a vsi
2608  * @vector: enable a particular Hw Interrupt vector
2609  **/
2610 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2611 {
2612         struct i40e_pf *pf = vsi->back;
2613         struct i40e_hw *hw = &pf->hw;
2614         u32 val;
2615
2616         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2617               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2618               (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2619         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2620         /* skip the flush */
2621 }
2622
2623 /**
2624  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2625  * @irq: interrupt number
2626  * @data: pointer to a q_vector
2627  **/
2628 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2629 {
2630         struct i40e_q_vector *q_vector = data;
2631
2632         if (!q_vector->tx.ring && !q_vector->rx.ring)
2633                 return IRQ_HANDLED;
2634
2635         napi_schedule(&q_vector->napi);
2636
2637         return IRQ_HANDLED;
2638 }
2639
2640 /**
2641  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2642  * @vsi: the VSI being configured
2643  * @basename: name for the vector
2644  *
2645  * Allocates MSI-X vectors and requests interrupts from the kernel.
2646  **/
2647 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2648 {
2649         int q_vectors = vsi->num_q_vectors;
2650         struct i40e_pf *pf = vsi->back;
2651         int base = vsi->base_vector;
2652         int rx_int_idx = 0;
2653         int tx_int_idx = 0;
2654         int vector, err;
2655
2656         for (vector = 0; vector < q_vectors; vector++) {
2657                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
2658
2659                 if (q_vector->tx.ring && q_vector->rx.ring) {
2660                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2661                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2662                         tx_int_idx++;
2663                 } else if (q_vector->rx.ring) {
2664                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2665                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
2666                 } else if (q_vector->tx.ring) {
2667                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2668                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
2669                 } else {
2670                         /* skip this unused q_vector */
2671                         continue;
2672                 }
2673                 err = request_irq(pf->msix_entries[base + vector].vector,
2674                                   vsi->irq_handler,
2675                                   0,
2676                                   q_vector->name,
2677                                   q_vector);
2678                 if (err) {
2679                         dev_info(&pf->pdev->dev,
2680                                  "%s: request_irq failed, error: %d\n",
2681                                  __func__, err);
2682                         goto free_queue_irqs;
2683                 }
2684                 /* assign the mask for this irq */
2685                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2686                                       &q_vector->affinity_mask);
2687         }
2688
2689         return 0;
2690
2691 free_queue_irqs:
2692         while (vector) {
2693                 vector--;
2694                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2695                                       NULL);
2696                 free_irq(pf->msix_entries[base + vector].vector,
2697                          &(vsi->q_vectors[vector]));
2698         }
2699         return err;
2700 }
2701
2702 /**
2703  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2704  * @vsi: the VSI being un-configured
2705  **/
2706 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2707 {
2708         struct i40e_pf *pf = vsi->back;
2709         struct i40e_hw *hw = &pf->hw;
2710         int base = vsi->base_vector;
2711         int i;
2712
2713         for (i = 0; i < vsi->num_queue_pairs; i++) {
2714                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2715                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
2716         }
2717
2718         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2719                 for (i = vsi->base_vector;
2720                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
2721                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2722
2723                 i40e_flush(hw);
2724                 for (i = 0; i < vsi->num_q_vectors; i++)
2725                         synchronize_irq(pf->msix_entries[i + base].vector);
2726         } else {
2727                 /* Legacy and MSI mode - this stops all interrupt handling */
2728                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2729                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2730                 i40e_flush(hw);
2731                 synchronize_irq(pf->pdev->irq);
2732         }
2733 }
2734
2735 /**
2736  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2737  * @vsi: the VSI being configured
2738  **/
2739 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2740 {
2741         struct i40e_pf *pf = vsi->back;
2742         int i;
2743
2744         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2745                 for (i = vsi->base_vector;
2746                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
2747                         i40e_irq_dynamic_enable(vsi, i);
2748         } else {
2749                 i40e_irq_dynamic_enable_icr0(pf);
2750         }
2751
2752         i40e_flush(&pf->hw);
2753         return 0;
2754 }
2755
2756 /**
2757  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2758  * @pf: board private structure
2759  **/
2760 static void i40e_stop_misc_vector(struct i40e_pf *pf)
2761 {
2762         /* Disable ICR 0 */
2763         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2764         i40e_flush(&pf->hw);
2765 }
2766
2767 /**
2768  * i40e_intr - MSI/Legacy and non-queue interrupt handler
2769  * @irq: interrupt number
2770  * @data: pointer to a q_vector
2771  *
2772  * This is the handler used for all MSI/Legacy interrupts, and deals
2773  * with both queue and non-queue interrupts.  This is also used in
2774  * MSIX mode to handle the non-queue interrupts.
2775  **/
2776 static irqreturn_t i40e_intr(int irq, void *data)
2777 {
2778         struct i40e_pf *pf = (struct i40e_pf *)data;
2779         struct i40e_hw *hw = &pf->hw;
2780         irqreturn_t ret = IRQ_NONE;
2781         u32 icr0, icr0_remaining;
2782         u32 val, ena_mask;
2783
2784         icr0 = rd32(hw, I40E_PFINT_ICR0);
2785         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2786
2787         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2788         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2789                 goto enable_intr;
2790
2791         /* if interrupt but no bits showing, must be SWINT */
2792         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
2793             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
2794                 pf->sw_int_count++;
2795
2796         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2797         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2798
2799                 /* temporarily disable queue cause for NAPI processing */
2800                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2801                 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2802                 wr32(hw, I40E_QINT_RQCTL(0), qval);
2803
2804                 qval = rd32(hw, I40E_QINT_TQCTL(0));
2805                 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2806                 wr32(hw, I40E_QINT_TQCTL(0), qval);
2807
2808                 if (!test_bit(__I40E_DOWN, &pf->state))
2809                         napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
2810         }
2811
2812         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2813                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2814                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2815         }
2816
2817         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2818                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2819                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2820         }
2821
2822         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2823                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2824                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2825         }
2826
2827         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2828                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2829                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2830                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2831                 val = rd32(hw, I40E_GLGEN_RSTAT);
2832                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2833                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
2834                 if (val == I40E_RESET_CORER)
2835                         pf->corer_count++;
2836                 else if (val == I40E_RESET_GLOBR)
2837                         pf->globr_count++;
2838                 else if (val == I40E_RESET_EMPR)
2839                         pf->empr_count++;
2840         }
2841
2842         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2843                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
2844                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2845         }
2846
2847         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
2848                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2849
2850                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
2851                         ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2852                         i40e_ptp_tx_hwtstamp(pf);
2853                         prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
2854                 }
2855
2856                 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
2857         }
2858
2859         /* If a critical error is pending we have no choice but to reset the
2860          * device.
2861          * Report and mask out any remaining unexpected interrupts.
2862          */
2863         icr0_remaining = icr0 & ena_mask;
2864         if (icr0_remaining) {
2865                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2866                          icr0_remaining);
2867                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2868                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2869                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2870                     (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2871                         dev_info(&pf->pdev->dev, "device will be reset\n");
2872                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2873                         i40e_service_event_schedule(pf);
2874                 }
2875                 ena_mask &= ~icr0_remaining;
2876         }
2877         ret = IRQ_HANDLED;
2878
2879 enable_intr:
2880         /* re-enable interrupt causes */
2881         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
2882         if (!test_bit(__I40E_DOWN, &pf->state)) {
2883                 i40e_service_event_schedule(pf);
2884                 i40e_irq_dynamic_enable_icr0(pf);
2885         }
2886
2887         return ret;
2888 }
2889
2890 /**
2891  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
2892  * @tx_ring:  tx ring to clean
2893  * @budget:   how many cleans we're allowed
2894  *
2895  * Returns true if there's any budget left (e.g. the clean is finished)
2896  **/
2897 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
2898 {
2899         struct i40e_vsi *vsi = tx_ring->vsi;
2900         u16 i = tx_ring->next_to_clean;
2901         struct i40e_tx_buffer *tx_buf;
2902         struct i40e_tx_desc *tx_desc;
2903
2904         tx_buf = &tx_ring->tx_bi[i];
2905         tx_desc = I40E_TX_DESC(tx_ring, i);
2906         i -= tx_ring->count;
2907
2908         do {
2909                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
2910
2911                 /* if next_to_watch is not set then there is no work pending */
2912                 if (!eop_desc)
2913                         break;
2914
2915                 /* prevent any other reads prior to eop_desc */
2916                 read_barrier_depends();
2917
2918                 /* if the descriptor isn't done, no work yet to do */
2919                 if (!(eop_desc->cmd_type_offset_bsz &
2920                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
2921                         break;
2922
2923                 /* clear next_to_watch to prevent false hangs */
2924                 tx_buf->next_to_watch = NULL;
2925
2926                 /* unmap skb header data */
2927                 dma_unmap_single(tx_ring->dev,
2928                                  dma_unmap_addr(tx_buf, dma),
2929                                  dma_unmap_len(tx_buf, len),
2930                                  DMA_TO_DEVICE);
2931
2932                 dma_unmap_len_set(tx_buf, len, 0);
2933
2934
2935                 /* move to the next desc and buffer to clean */
2936                 tx_buf++;
2937                 tx_desc++;
2938                 i++;
2939                 if (unlikely(!i)) {
2940                         i -= tx_ring->count;
2941                         tx_buf = tx_ring->tx_bi;
2942                         tx_desc = I40E_TX_DESC(tx_ring, 0);
2943                 }
2944
2945                 /* update budget accounting */
2946                 budget--;
2947         } while (likely(budget));
2948
2949         i += tx_ring->count;
2950         tx_ring->next_to_clean = i;
2951
2952         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
2953                 i40e_irq_dynamic_enable(vsi,
2954                                 tx_ring->q_vector->v_idx + vsi->base_vector);
2955         }
2956         return budget > 0;
2957 }
2958
2959 /**
2960  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
2961  * @irq: interrupt number
2962  * @data: pointer to a q_vector
2963  **/
2964 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
2965 {
2966         struct i40e_q_vector *q_vector = data;
2967         struct i40e_vsi *vsi;
2968
2969         if (!q_vector->tx.ring)
2970                 return IRQ_HANDLED;
2971
2972         vsi = q_vector->tx.ring->vsi;
2973         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
2974
2975         return IRQ_HANDLED;
2976 }
2977
2978 /**
2979  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
2980  * @vsi: the VSI being configured
2981  * @v_idx: vector index
2982  * @qp_idx: queue pair index
2983  **/
2984 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
2985 {
2986         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
2987         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
2988         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
2989
2990         tx_ring->q_vector = q_vector;
2991         tx_ring->next = q_vector->tx.ring;
2992         q_vector->tx.ring = tx_ring;
2993         q_vector->tx.count++;
2994
2995         rx_ring->q_vector = q_vector;
2996         rx_ring->next = q_vector->rx.ring;
2997         q_vector->rx.ring = rx_ring;
2998         q_vector->rx.count++;
2999 }
3000
3001 /**
3002  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3003  * @vsi: the VSI being configured
3004  *
3005  * This function maps descriptor rings to the queue-specific vectors
3006  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3007  * one vector per queue pair, but on a constrained vector budget, we
3008  * group the queue pairs as "efficiently" as possible.
3009  **/
3010 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3011 {
3012         int qp_remaining = vsi->num_queue_pairs;
3013         int q_vectors = vsi->num_q_vectors;
3014         int num_ringpairs;
3015         int v_start = 0;
3016         int qp_idx = 0;
3017
3018         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3019          * group them so there are multiple queues per vector.
3020          */
3021         for (; v_start < q_vectors && qp_remaining; v_start++) {
3022                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3023
3024                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3025
3026                 q_vector->num_ringpairs = num_ringpairs;
3027
3028                 q_vector->rx.count = 0;
3029                 q_vector->tx.count = 0;
3030                 q_vector->rx.ring = NULL;
3031                 q_vector->tx.ring = NULL;
3032
3033                 while (num_ringpairs--) {
3034                         map_vector_to_qp(vsi, v_start, qp_idx);
3035                         qp_idx++;
3036                         qp_remaining--;
3037                 }
3038         }
3039 }
3040
3041 /**
3042  * i40e_vsi_request_irq - Request IRQ from the OS
3043  * @vsi: the VSI being configured
3044  * @basename: name for the vector
3045  **/
3046 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3047 {
3048         struct i40e_pf *pf = vsi->back;
3049         int err;
3050
3051         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3052                 err = i40e_vsi_request_irq_msix(vsi, basename);
3053         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3054                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3055                                   pf->misc_int_name, pf);
3056         else
3057                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3058                                   pf->misc_int_name, pf);
3059
3060         if (err)
3061                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3062
3063         return err;
3064 }
3065
3066 #ifdef CONFIG_NET_POLL_CONTROLLER
3067 /**
3068  * i40e_netpoll - A Polling 'interrupt'handler
3069  * @netdev: network interface device structure
3070  *
3071  * This is used by netconsole to send skbs without having to re-enable
3072  * interrupts.  It's not called while the normal interrupt routine is executing.
3073  **/
3074 static void i40e_netpoll(struct net_device *netdev)
3075 {
3076         struct i40e_netdev_priv *np = netdev_priv(netdev);
3077         struct i40e_vsi *vsi = np->vsi;
3078         struct i40e_pf *pf = vsi->back;
3079         int i;
3080
3081         /* if interface is down do nothing */
3082         if (test_bit(__I40E_DOWN, &vsi->state))
3083                 return;
3084
3085         pf->flags |= I40E_FLAG_IN_NETPOLL;
3086         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3087                 for (i = 0; i < vsi->num_q_vectors; i++)
3088                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3089         } else {
3090                 i40e_intr(pf->pdev->irq, netdev);
3091         }
3092         pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3093 }
3094 #endif
3095
3096 /**
3097  * i40e_vsi_control_tx - Start or stop a VSI's rings
3098  * @vsi: the VSI being configured
3099  * @enable: start or stop the rings
3100  **/
3101 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3102 {
3103         struct i40e_pf *pf = vsi->back;
3104         struct i40e_hw *hw = &pf->hw;
3105         int i, j, pf_q;
3106         u32 tx_reg;
3107
3108         pf_q = vsi->base_queue;
3109         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3110                 j = 1000;
3111                 do {
3112                         usleep_range(1000, 2000);
3113                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3114                 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
3115                                ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
3116
3117                 /* Skip if the queue is already in the requested state */
3118                 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3119                         continue;
3120                 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3121                         continue;
3122
3123                 /* turn on/off the queue */
3124                 if (enable) {
3125                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3126                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3127                                   I40E_QTX_ENA_QENA_STAT_MASK;
3128                 } else {
3129                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3130                 }
3131
3132                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3133
3134                 /* wait for the change to finish */
3135                 for (j = 0; j < 10; j++) {
3136                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3137                         if (enable) {
3138                                 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3139                                         break;
3140                         } else {
3141                                 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3142                                         break;
3143                         }
3144
3145                         udelay(10);
3146                 }
3147                 if (j >= 10) {
3148                         dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3149                                  pf_q, (enable ? "en" : "dis"));
3150                         return -ETIMEDOUT;
3151                 }
3152         }
3153
3154         if (hw->revision_id == 0)
3155                 mdelay(50);
3156
3157         return 0;
3158 }
3159
3160 /**
3161  * i40e_vsi_control_rx - Start or stop a VSI's rings
3162  * @vsi: the VSI being configured
3163  * @enable: start or stop the rings
3164  **/
3165 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3166 {
3167         struct i40e_pf *pf = vsi->back;
3168         struct i40e_hw *hw = &pf->hw;
3169         int i, j, pf_q;
3170         u32 rx_reg;
3171
3172         pf_q = vsi->base_queue;
3173         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3174                 j = 1000;
3175                 do {
3176                         usleep_range(1000, 2000);
3177                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3178                 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
3179                                ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
3180
3181                 if (enable) {
3182                         /* is STAT set ? */
3183                         if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3184                                 continue;
3185                 } else {
3186                         /* is !STAT set ? */
3187                         if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3188                                 continue;
3189                 }
3190
3191                 /* turn on/off the queue */
3192                 if (enable)
3193                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3194                                   I40E_QRX_ENA_QENA_STAT_MASK;
3195                 else
3196                         rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3197                                   I40E_QRX_ENA_QENA_STAT_MASK);
3198                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3199
3200                 /* wait for the change to finish */
3201                 for (j = 0; j < 10; j++) {
3202                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3203
3204                         if (enable) {
3205                                 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3206                                         break;
3207                         } else {
3208                                 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3209                                         break;
3210                         }
3211
3212                         udelay(10);
3213                 }
3214                 if (j >= 10) {
3215                         dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3216                                  pf_q, (enable ? "en" : "dis"));
3217                         return -ETIMEDOUT;
3218                 }
3219         }
3220
3221         return 0;
3222 }
3223
3224 /**
3225  * i40e_vsi_control_rings - Start or stop a VSI's rings
3226  * @vsi: the VSI being configured
3227  * @enable: start or stop the rings
3228  **/
3229 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3230 {
3231         int ret = 0;
3232
3233         /* do rx first for enable and last for disable */
3234         if (request) {
3235                 ret = i40e_vsi_control_rx(vsi, request);
3236                 if (ret)
3237                         return ret;
3238                 ret = i40e_vsi_control_tx(vsi, request);
3239         } else {
3240                 /* Ignore return value, we need to shutdown whatever we can */
3241                 i40e_vsi_control_tx(vsi, request);
3242                 i40e_vsi_control_rx(vsi, request);
3243         }
3244
3245         return ret;
3246 }
3247
3248 /**
3249  * i40e_vsi_free_irq - Free the irq association with the OS
3250  * @vsi: the VSI being configured
3251  **/
3252 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3253 {
3254         struct i40e_pf *pf = vsi->back;
3255         struct i40e_hw *hw = &pf->hw;
3256         int base = vsi->base_vector;
3257         u32 val, qp;
3258         int i;
3259
3260         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3261                 if (!vsi->q_vectors)
3262                         return;
3263
3264                 for (i = 0; i < vsi->num_q_vectors; i++) {
3265                         u16 vector = i + base;
3266
3267                         /* free only the irqs that were actually requested */
3268                         if (!vsi->q_vectors[i] ||
3269                             !vsi->q_vectors[i]->num_ringpairs)
3270                                 continue;
3271
3272                         /* clear the affinity_mask in the IRQ descriptor */
3273                         irq_set_affinity_hint(pf->msix_entries[vector].vector,
3274                                               NULL);
3275                         free_irq(pf->msix_entries[vector].vector,
3276                                  vsi->q_vectors[i]);
3277
3278                         /* Tear down the interrupt queue link list
3279                          *
3280                          * We know that they come in pairs and always
3281                          * the Rx first, then the Tx.  To clear the
3282                          * link list, stick the EOL value into the
3283                          * next_q field of the registers.
3284                          */
3285                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3286                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3287                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3288                         val |= I40E_QUEUE_END_OF_LIST
3289                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3290                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3291
3292                         while (qp != I40E_QUEUE_END_OF_LIST) {
3293                                 u32 next;
3294
3295                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3296
3297                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3298                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3299                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3300                                          I40E_QINT_RQCTL_INTEVENT_MASK);
3301
3302                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3303                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3304
3305                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3306
3307                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3308
3309                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3310                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3311
3312                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3313                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3314                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3315                                          I40E_QINT_TQCTL_INTEVENT_MASK);
3316
3317                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3318                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3319
3320                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3321                                 qp = next;
3322                         }
3323                 }
3324         } else {
3325                 free_irq(pf->pdev->irq, pf);
3326
3327                 val = rd32(hw, I40E_PFINT_LNKLST0);
3328                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3329                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3330                 val |= I40E_QUEUE_END_OF_LIST
3331                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3332                 wr32(hw, I40E_PFINT_LNKLST0, val);
3333
3334                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3335                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3336                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3337                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3338                          I40E_QINT_RQCTL_INTEVENT_MASK);
3339
3340                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3341                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3342
3343                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3344
3345                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3346
3347                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3348                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3349                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3350                          I40E_QINT_TQCTL_INTEVENT_MASK);
3351
3352                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3353                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3354
3355                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3356         }
3357 }
3358
3359 /**
3360  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3361  * @vsi: the VSI being configured
3362  * @v_idx: Index of vector to be freed
3363  *
3364  * This function frees the memory allocated to the q_vector.  In addition if
3365  * NAPI is enabled it will delete any references to the NAPI struct prior
3366  * to freeing the q_vector.
3367  **/
3368 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3369 {
3370         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3371         struct i40e_ring *ring;
3372
3373         if (!q_vector)
3374                 return;
3375
3376         /* disassociate q_vector from rings */
3377         i40e_for_each_ring(ring, q_vector->tx)
3378                 ring->q_vector = NULL;
3379
3380         i40e_for_each_ring(ring, q_vector->rx)
3381                 ring->q_vector = NULL;
3382
3383         /* only VSI w/ an associated netdev is set up w/ NAPI */
3384         if (vsi->netdev)
3385                 netif_napi_del(&q_vector->napi);
3386
3387         vsi->q_vectors[v_idx] = NULL;
3388
3389         kfree_rcu(q_vector, rcu);
3390 }
3391
3392 /**
3393  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3394  * @vsi: the VSI being un-configured
3395  *
3396  * This frees the memory allocated to the q_vectors and
3397  * deletes references to the NAPI struct.
3398  **/
3399 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3400 {
3401         int v_idx;
3402
3403         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3404                 i40e_free_q_vector(vsi, v_idx);
3405 }
3406
3407 /**
3408  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3409  * @pf: board private structure
3410  **/
3411 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3412 {
3413         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3414         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3415                 pci_disable_msix(pf->pdev);
3416                 kfree(pf->msix_entries);
3417                 pf->msix_entries = NULL;
3418         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3419                 pci_disable_msi(pf->pdev);
3420         }
3421         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3422 }
3423
3424 /**
3425  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3426  * @pf: board private structure
3427  *
3428  * We go through and clear interrupt specific resources and reset the structure
3429  * to pre-load conditions
3430  **/
3431 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3432 {
3433         int i;
3434
3435         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3436         for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3437                 if (pf->vsi[i])
3438                         i40e_vsi_free_q_vectors(pf->vsi[i]);
3439         i40e_reset_interrupt_capability(pf);
3440 }
3441
3442 /**
3443  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3444  * @vsi: the VSI being configured
3445  **/
3446 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3447 {
3448         int q_idx;
3449
3450         if (!vsi->netdev)
3451                 return;
3452
3453         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3454                 napi_enable(&vsi->q_vectors[q_idx]->napi);
3455 }
3456
3457 /**
3458  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3459  * @vsi: the VSI being configured
3460  **/
3461 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3462 {
3463         int q_idx;
3464
3465         if (!vsi->netdev)
3466                 return;
3467
3468         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3469                 napi_disable(&vsi->q_vectors[q_idx]->napi);
3470 }
3471
3472 /**
3473  * i40e_quiesce_vsi - Pause a given VSI
3474  * @vsi: the VSI being paused
3475  **/
3476 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3477 {
3478         if (test_bit(__I40E_DOWN, &vsi->state))
3479                 return;
3480
3481         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3482         if (vsi->netdev && netif_running(vsi->netdev)) {
3483                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3484         } else {
3485                 set_bit(__I40E_DOWN, &vsi->state);
3486                 i40e_down(vsi);
3487         }
3488 }
3489
3490 /**
3491  * i40e_unquiesce_vsi - Resume a given VSI
3492  * @vsi: the VSI being resumed
3493  **/
3494 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3495 {
3496         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3497                 return;
3498
3499         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3500         if (vsi->netdev && netif_running(vsi->netdev))
3501                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3502         else
3503                 i40e_up(vsi);   /* this clears the DOWN bit */
3504 }
3505
3506 /**
3507  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3508  * @pf: the PF
3509  **/
3510 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3511 {
3512         int v;
3513
3514         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3515                 if (pf->vsi[v])
3516                         i40e_quiesce_vsi(pf->vsi[v]);
3517         }
3518 }
3519
3520 /**
3521  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3522  * @pf: the PF
3523  **/
3524 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3525 {
3526         int v;
3527
3528         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3529                 if (pf->vsi[v])
3530                         i40e_unquiesce_vsi(pf->vsi[v]);
3531         }
3532 }
3533
3534 /**
3535  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
3536  * @dcbcfg: the corresponding DCBx configuration structure
3537  *
3538  * Return the number of TCs from given DCBx configuration
3539  **/
3540 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3541 {
3542         u8 num_tc = 0;
3543         int i;
3544
3545         /* Scan the ETS Config Priority Table to find
3546          * traffic class enabled for a given priority
3547          * and use the traffic class index to get the
3548          * number of traffic classes enabled
3549          */
3550         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3551                 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3552                         num_tc = dcbcfg->etscfg.prioritytable[i];
3553         }
3554
3555         /* Traffic class index starts from zero so
3556          * increment to return the actual count
3557          */
3558         return num_tc + 1;
3559 }
3560
3561 /**
3562  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3563  * @dcbcfg: the corresponding DCBx configuration structure
3564  *
3565  * Query the current DCB configuration and return the number of
3566  * traffic classes enabled from the given DCBX config
3567  **/
3568 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3569 {
3570         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3571         u8 enabled_tc = 1;
3572         u8 i;
3573
3574         for (i = 0; i < num_tc; i++)
3575                 enabled_tc |= 1 << i;
3576
3577         return enabled_tc;
3578 }
3579
3580 /**
3581  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3582  * @pf: PF being queried
3583  *
3584  * Return number of traffic classes enabled for the given PF
3585  **/
3586 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3587 {
3588         struct i40e_hw *hw = &pf->hw;
3589         u8 i, enabled_tc;
3590         u8 num_tc = 0;
3591         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3592
3593         /* If DCB is not enabled then always in single TC */
3594         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3595                 return 1;
3596
3597         /* MFP mode return count of enabled TCs for this PF */
3598         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3599                 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3600                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3601                         if (enabled_tc & (1 << i))
3602                                 num_tc++;
3603                 }
3604                 return num_tc;
3605         }
3606
3607         /* SFP mode will be enabled for all TCs on port */
3608         return i40e_dcb_get_num_tc(dcbcfg);
3609 }
3610
3611 /**
3612  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3613  * @pf: PF being queried
3614  *
3615  * Return a bitmap for first enabled traffic class for this PF.
3616  **/
3617 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3618 {
3619         u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3620         u8 i = 0;
3621
3622         if (!enabled_tc)
3623                 return 0x1; /* TC0 */
3624
3625         /* Find the first enabled TC */
3626         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3627                 if (enabled_tc & (1 << i))
3628                         break;
3629         }
3630
3631         return 1 << i;
3632 }
3633
3634 /**
3635  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3636  * @pf: PF being queried
3637  *
3638  * Return a bitmap for enabled traffic classes for this PF.
3639  **/
3640 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3641 {
3642         /* If DCB is not enabled for this PF then just return default TC */
3643         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3644                 return i40e_pf_get_default_tc(pf);
3645
3646         /* MFP mode will have enabled TCs set by FW */
3647         if (pf->flags & I40E_FLAG_MFP_ENABLED)
3648                 return pf->hw.func_caps.enabled_tcmap;
3649
3650         /* SFP mode we want PF to be enabled for all TCs */
3651         return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3652 }
3653
3654 /**
3655  * i40e_vsi_get_bw_info - Query VSI BW Information
3656  * @vsi: the VSI being queried
3657  *
3658  * Returns 0 on success, negative value on failure
3659  **/
3660 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3661 {
3662         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3663         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3664         struct i40e_pf *pf = vsi->back;
3665         struct i40e_hw *hw = &pf->hw;
3666         i40e_status aq_ret;
3667         u32 tc_bw_max;
3668         int i;
3669
3670         /* Get the VSI level BW configuration */
3671         aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3672         if (aq_ret) {
3673                 dev_info(&pf->pdev->dev,
3674                          "couldn't get pf vsi bw config, err %d, aq_err %d\n",
3675                          aq_ret, pf->hw.aq.asq_last_status);
3676                 return -EINVAL;
3677         }
3678
3679         /* Get the VSI level BW configuration per TC */
3680         aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3681                                                   NULL);
3682         if (aq_ret) {
3683                 dev_info(&pf->pdev->dev,
3684                          "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
3685                          aq_ret, pf->hw.aq.asq_last_status);
3686                 return -EINVAL;
3687         }
3688
3689         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3690                 dev_info(&pf->pdev->dev,
3691                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3692                          bw_config.tc_valid_bits,
3693                          bw_ets_config.tc_valid_bits);
3694                 /* Still continuing */
3695         }
3696
3697         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3698         vsi->bw_max_quanta = bw_config.max_bw;
3699         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3700                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3701         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3702                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3703                 vsi->bw_ets_limit_credits[i] =
3704                                         le16_to_cpu(bw_ets_config.credits[i]);
3705                 /* 3 bits out of 4 for each TC */
3706                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3707         }
3708
3709         return 0;
3710 }
3711
3712 /**
3713  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3714  * @vsi: the VSI being configured
3715  * @enabled_tc: TC bitmap
3716  * @bw_credits: BW shared credits per TC
3717  *
3718  * Returns 0 on success, negative value on failure
3719  **/
3720 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3721                                        u8 *bw_share)
3722 {
3723         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
3724         i40e_status aq_ret;
3725         int i;
3726
3727         bw_data.tc_valid_bits = enabled_tc;
3728         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3729                 bw_data.tc_bw_credits[i] = bw_share[i];
3730
3731         aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3732                                           NULL);
3733         if (aq_ret) {
3734                 dev_info(&vsi->back->pdev->dev,
3735                          "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3736                          __func__, vsi->back->hw.aq.asq_last_status);
3737                 return -EINVAL;
3738         }
3739
3740         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3741                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3742
3743         return 0;
3744 }
3745
3746 /**
3747  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3748  * @vsi: the VSI being configured
3749  * @enabled_tc: TC map to be enabled
3750  *
3751  **/
3752 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3753 {
3754         struct net_device *netdev = vsi->netdev;
3755         struct i40e_pf *pf = vsi->back;
3756         struct i40e_hw *hw = &pf->hw;
3757         u8 netdev_tc = 0;
3758         int i;
3759         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3760
3761         if (!netdev)
3762                 return;
3763
3764         if (!enabled_tc) {
3765                 netdev_reset_tc(netdev);
3766                 return;
3767         }
3768
3769         /* Set up actual enabled TCs on the VSI */
3770         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3771                 return;
3772
3773         /* set per TC queues for the VSI */
3774         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3775                 /* Only set TC queues for enabled tcs
3776                  *
3777                  * e.g. For a VSI that has TC0 and TC3 enabled the
3778                  * enabled_tc bitmap would be 0x00001001; the driver
3779                  * will set the numtc for netdev as 2 that will be
3780                  * referenced by the netdev layer as TC 0 and 1.
3781                  */
3782                 if (vsi->tc_config.enabled_tc & (1 << i))
3783                         netdev_set_tc_queue(netdev,
3784                                         vsi->tc_config.tc_info[i].netdev_tc,
3785                                         vsi->tc_config.tc_info[i].qcount,
3786                                         vsi->tc_config.tc_info[i].qoffset);
3787         }
3788
3789         /* Assign UP2TC map for the VSI */
3790         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3791                 /* Get the actual TC# for the UP */
3792                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3793                 /* Get the mapped netdev TC# for the UP */
3794                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
3795                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3796         }
3797 }
3798
3799 /**
3800  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3801  * @vsi: the VSI being configured
3802  * @ctxt: the ctxt buffer returned from AQ VSI update param command
3803  **/
3804 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3805                                       struct i40e_vsi_context *ctxt)
3806 {
3807         /* copy just the sections touched not the entire info
3808          * since not all sections are valid as returned by
3809          * update vsi params
3810          */
3811         vsi->info.mapping_flags = ctxt->info.mapping_flags;
3812         memcpy(&vsi->info.queue_mapping,
3813                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3814         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3815                sizeof(vsi->info.tc_mapping));
3816 }
3817
3818 /**
3819  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3820  * @vsi: VSI to be configured
3821  * @enabled_tc: TC bitmap
3822  *
3823  * This configures a particular VSI for TCs that are mapped to the
3824  * given TC bitmap. It uses default bandwidth share for TCs across
3825  * VSIs to configure TC for a particular VSI.
3826  *
3827  * NOTE:
3828  * It is expected that the VSI queues have been quisced before calling
3829  * this function.
3830  **/
3831 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3832 {
3833         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3834         struct i40e_vsi_context ctxt;
3835         int ret = 0;
3836         int i;
3837
3838         /* Check if enabled_tc is same as existing or new TCs */
3839         if (vsi->tc_config.enabled_tc == enabled_tc)
3840                 return ret;
3841
3842         /* Enable ETS TCs with equal BW Share for now across all VSIs */
3843         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3844                 if (enabled_tc & (1 << i))
3845                         bw_share[i] = 1;
3846         }
3847
3848         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3849         if (ret) {
3850                 dev_info(&vsi->back->pdev->dev,
3851                          "Failed configuring TC map %d for VSI %d\n",
3852                          enabled_tc, vsi->seid);
3853                 goto out;
3854         }
3855
3856         /* Update Queue Pairs Mapping for currently enabled UPs */
3857         ctxt.seid = vsi->seid;
3858         ctxt.pf_num = vsi->back->hw.pf_id;
3859         ctxt.vf_num = 0;
3860         ctxt.uplink_seid = vsi->uplink_seid;
3861         memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3862         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3863
3864         /* Update the VSI after updating the VSI queue-mapping information */
3865         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3866         if (ret) {
3867                 dev_info(&vsi->back->pdev->dev,
3868                          "update vsi failed, aq_err=%d\n",
3869                          vsi->back->hw.aq.asq_last_status);
3870                 goto out;
3871         }
3872         /* update the local VSI info with updated queue map */
3873         i40e_vsi_update_queue_map(vsi, &ctxt);
3874         vsi->info.valid_sections = 0;
3875
3876         /* Update current VSI BW information */
3877         ret = i40e_vsi_get_bw_info(vsi);
3878         if (ret) {
3879                 dev_info(&vsi->back->pdev->dev,
3880                          "Failed updating vsi bw info, aq_err=%d\n",
3881                          vsi->back->hw.aq.asq_last_status);
3882                 goto out;
3883         }
3884
3885         /* Update the netdev TC setup */
3886         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3887 out:
3888         return ret;
3889 }
3890
3891 /**
3892  * i40e_veb_config_tc - Configure TCs for given VEB
3893  * @veb: given VEB
3894  * @enabled_tc: TC bitmap
3895  *
3896  * Configures given TC bitmap for VEB (switching) element
3897  **/
3898 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
3899 {
3900         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
3901         struct i40e_pf *pf = veb->pf;
3902         int ret = 0;
3903         int i;
3904
3905         /* No TCs or already enabled TCs just return */
3906         if (!enabled_tc || veb->enabled_tc == enabled_tc)
3907                 return ret;
3908
3909         bw_data.tc_valid_bits = enabled_tc;
3910         /* bw_data.absolute_credits is not set (relative) */
3911
3912         /* Enable ETS TCs with equal BW Share for now */
3913         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3914                 if (enabled_tc & (1 << i))
3915                         bw_data.tc_bw_share_credits[i] = 1;
3916         }
3917
3918         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
3919                                                    &bw_data, NULL);
3920         if (ret) {
3921                 dev_info(&pf->pdev->dev,
3922                          "veb bw config failed, aq_err=%d\n",
3923                          pf->hw.aq.asq_last_status);
3924                 goto out;
3925         }
3926
3927         /* Update the BW information */
3928         ret = i40e_veb_get_bw_info(veb);
3929         if (ret) {
3930                 dev_info(&pf->pdev->dev,
3931                          "Failed getting veb bw config, aq_err=%d\n",
3932                          pf->hw.aq.asq_last_status);
3933         }
3934
3935 out:
3936         return ret;
3937 }
3938
3939 #ifdef CONFIG_I40E_DCB
3940 /**
3941  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
3942  * @pf: PF struct
3943  *
3944  * Reconfigure VEB/VSIs on a given PF; it is assumed that
3945  * the caller would've quiesce all the VSIs before calling
3946  * this function
3947  **/
3948 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
3949 {
3950         u8 tc_map = 0;
3951         int ret;
3952         u8 v;
3953
3954         /* Enable the TCs available on PF to all VEBs */
3955         tc_map = i40e_pf_get_tc_map(pf);
3956         for (v = 0; v < I40E_MAX_VEB; v++) {
3957                 if (!pf->veb[v])
3958                         continue;
3959                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
3960                 if (ret) {
3961                         dev_info(&pf->pdev->dev,
3962                                  "Failed configuring TC for VEB seid=%d\n",
3963                                  pf->veb[v]->seid);
3964                         /* Will try to configure as many components */
3965                 }
3966         }
3967
3968         /* Update each VSI */
3969         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3970                 if (!pf->vsi[v])
3971                         continue;
3972
3973                 /* - Enable all TCs for the LAN VSI
3974                  * - For all others keep them at TC0 for now
3975                  */
3976                 if (v == pf->lan_vsi)
3977                         tc_map = i40e_pf_get_tc_map(pf);
3978                 else
3979                         tc_map = i40e_pf_get_default_tc(pf);
3980
3981                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
3982                 if (ret) {
3983                         dev_info(&pf->pdev->dev,
3984                                  "Failed configuring TC for VSI seid=%d\n",
3985                                  pf->vsi[v]->seid);
3986                         /* Will try to configure as many components */
3987                 } else {
3988                         if (pf->vsi[v]->netdev)
3989                                 i40e_dcbnl_set_all(pf->vsi[v]);
3990                 }
3991         }
3992 }
3993
3994 /**
3995  * i40e_init_pf_dcb - Initialize DCB configuration
3996  * @pf: PF being configured
3997  *
3998  * Query the current DCB configuration and cache it
3999  * in the hardware structure
4000  **/
4001 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4002 {
4003         struct i40e_hw *hw = &pf->hw;
4004         int err = 0;
4005
4006         if (pf->hw.func_caps.npar_enable)
4007                 goto out;
4008
4009         /* Get the initial DCB configuration */
4010         err = i40e_init_dcb(hw);
4011         if (!err) {
4012                 /* Device/Function is not DCBX capable */
4013                 if ((!hw->func_caps.dcb) ||
4014                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4015                         dev_info(&pf->pdev->dev,
4016                                  "DCBX offload is not supported or is disabled for this PF.\n");
4017
4018                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
4019                                 goto out;
4020
4021                 } else {
4022                         /* When status is not DISABLED then DCBX in FW */
4023                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4024                                        DCB_CAP_DCBX_VER_IEEE;
4025                         pf->flags |= I40E_FLAG_DCB_ENABLED;
4026                 }
4027         }
4028
4029 out:
4030         return err;
4031 }
4032 #endif /* CONFIG_I40E_DCB */
4033
4034 /**
4035  * i40e_up_complete - Finish the last steps of bringing up a connection
4036  * @vsi: the VSI being configured
4037  **/
4038 static int i40e_up_complete(struct i40e_vsi *vsi)
4039 {
4040         struct i40e_pf *pf = vsi->back;
4041         int err;
4042
4043         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4044                 i40e_vsi_configure_msix(vsi);
4045         else
4046                 i40e_configure_msi_and_legacy(vsi);
4047
4048         /* start rings */
4049         err = i40e_vsi_control_rings(vsi, true);
4050         if (err)
4051                 return err;
4052
4053         clear_bit(__I40E_DOWN, &vsi->state);
4054         i40e_napi_enable_all(vsi);
4055         i40e_vsi_enable_irq(vsi);
4056
4057         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4058             (vsi->netdev)) {
4059                 netdev_info(vsi->netdev, "NIC Link is Up\n");
4060                 netif_tx_start_all_queues(vsi->netdev);
4061                 netif_carrier_on(vsi->netdev);
4062         } else if (vsi->netdev) {
4063                 netdev_info(vsi->netdev, "NIC Link is Down\n");
4064         }
4065         i40e_service_event_schedule(pf);
4066
4067         return 0;
4068 }
4069
4070 /**
4071  * i40e_vsi_reinit_locked - Reset the VSI
4072  * @vsi: the VSI being configured
4073  *
4074  * Rebuild the ring structs after some configuration
4075  * has changed, e.g. MTU size.
4076  **/
4077 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4078 {
4079         struct i40e_pf *pf = vsi->back;
4080
4081         WARN_ON(in_interrupt());
4082         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4083                 usleep_range(1000, 2000);
4084         i40e_down(vsi);
4085
4086         /* Give a VF some time to respond to the reset.  The
4087          * two second wait is based upon the watchdog cycle in
4088          * the VF driver.
4089          */
4090         if (vsi->type == I40E_VSI_SRIOV)
4091                 msleep(2000);
4092         i40e_up(vsi);
4093         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4094 }
4095
4096 /**
4097  * i40e_up - Bring the connection back up after being down
4098  * @vsi: the VSI being configured
4099  **/
4100 int i40e_up(struct i40e_vsi *vsi)
4101 {
4102         int err;
4103
4104         err = i40e_vsi_configure(vsi);
4105         if (!err)
4106                 err = i40e_up_complete(vsi);
4107
4108         return err;
4109 }
4110
4111 /**
4112  * i40e_down - Shutdown the connection processing
4113  * @vsi: the VSI being stopped
4114  **/
4115 void i40e_down(struct i40e_vsi *vsi)
4116 {
4117         int i;
4118
4119         /* It is assumed that the caller of this function
4120          * sets the vsi->state __I40E_DOWN bit.
4121          */
4122         if (vsi->netdev) {
4123                 netif_carrier_off(vsi->netdev);
4124                 netif_tx_disable(vsi->netdev);
4125         }
4126         i40e_vsi_disable_irq(vsi);
4127         i40e_vsi_control_rings(vsi, false);
4128         i40e_napi_disable_all(vsi);
4129
4130         for (i = 0; i < vsi->num_queue_pairs; i++) {
4131                 i40e_clean_tx_ring(vsi->tx_rings[i]);
4132                 i40e_clean_rx_ring(vsi->rx_rings[i]);
4133         }
4134 }
4135
4136 /**
4137  * i40e_setup_tc - configure multiple traffic classes
4138  * @netdev: net device to configure
4139  * @tc: number of traffic classes to enable
4140  **/
4141 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4142 {
4143         struct i40e_netdev_priv *np = netdev_priv(netdev);
4144         struct i40e_vsi *vsi = np->vsi;
4145         struct i40e_pf *pf = vsi->back;
4146         u8 enabled_tc = 0;
4147         int ret = -EINVAL;
4148         int i;
4149
4150         /* Check if DCB enabled to continue */
4151         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4152                 netdev_info(netdev, "DCB is not enabled for adapter\n");
4153                 goto exit;
4154         }
4155
4156         /* Check if MFP enabled */
4157         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4158                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4159                 goto exit;
4160         }
4161
4162         /* Check whether tc count is within enabled limit */
4163         if (tc > i40e_pf_get_num_tc(pf)) {
4164                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4165                 goto exit;
4166         }
4167
4168         /* Generate TC map for number of tc requested */
4169         for (i = 0; i < tc; i++)
4170                 enabled_tc |= (1 << i);
4171
4172         /* Requesting same TC configuration as already enabled */
4173         if (enabled_tc == vsi->tc_config.enabled_tc)
4174                 return 0;
4175
4176         /* Quiesce VSI queues */
4177         i40e_quiesce_vsi(vsi);
4178
4179         /* Configure VSI for enabled TCs */
4180         ret = i40e_vsi_config_tc(vsi, enabled_tc);
4181         if (ret) {
4182                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4183                             vsi->seid);
4184                 goto exit;
4185         }
4186
4187         /* Unquiesce VSI */
4188         i40e_unquiesce_vsi(vsi);
4189
4190 exit:
4191         return ret;
4192 }
4193
4194 /**
4195  * i40e_open - Called when a network interface is made active
4196  * @netdev: network interface device structure
4197  *
4198  * The open entry point is called when a network interface is made
4199  * active by the system (IFF_UP).  At this point all resources needed
4200  * for transmit and receive operations are allocated, the interrupt
4201  * handler is registered with the OS, the netdev watchdog subtask is
4202  * enabled, and the stack is notified that the interface is ready.
4203  *
4204  * Returns 0 on success, negative value on failure
4205  **/
4206 static int i40e_open(struct net_device *netdev)
4207 {
4208         struct i40e_netdev_priv *np = netdev_priv(netdev);
4209         struct i40e_vsi *vsi = np->vsi;
4210         struct i40e_pf *pf = vsi->back;
4211         char int_name[IFNAMSIZ];
4212         int err;
4213
4214         /* disallow open during test */
4215         if (test_bit(__I40E_TESTING, &pf->state))
4216                 return -EBUSY;
4217
4218         netif_carrier_off(netdev);
4219
4220         /* allocate descriptors */
4221         err = i40e_vsi_setup_tx_resources(vsi);
4222         if (err)
4223                 goto err_setup_tx;
4224         err = i40e_vsi_setup_rx_resources(vsi);
4225         if (err)
4226                 goto err_setup_rx;
4227
4228         err = i40e_vsi_configure(vsi);
4229         if (err)
4230                 goto err_setup_rx;
4231
4232         snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4233                  dev_driver_string(&pf->pdev->dev), netdev->name);
4234         err = i40e_vsi_request_irq(vsi, int_name);
4235         if (err)
4236                 goto err_setup_rx;
4237
4238         /* Notify the stack of the actual queue counts. */
4239         err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
4240         if (err)
4241                 goto err_set_queues;
4242
4243         err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
4244         if (err)
4245                 goto err_set_queues;
4246
4247         err = i40e_up_complete(vsi);
4248         if (err)
4249                 goto err_up_complete;
4250
4251 #ifdef CONFIG_I40E_VXLAN
4252         vxlan_get_rx_port(netdev);
4253 #endif
4254
4255         return 0;
4256
4257 err_up_complete:
4258         i40e_down(vsi);
4259 err_set_queues:
4260         i40e_vsi_free_irq(vsi);
4261 err_setup_rx:
4262         i40e_vsi_free_rx_resources(vsi);
4263 err_setup_tx:
4264         i40e_vsi_free_tx_resources(vsi);
4265         if (vsi == pf->vsi[pf->lan_vsi])
4266                 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4267
4268         return err;
4269 }
4270
4271 /**
4272  * i40e_close - Disables a network interface
4273  * @netdev: network interface device structure
4274  *
4275  * The close entry point is called when an interface is de-activated
4276  * by the OS.  The hardware is still under the driver's control, but
4277  * this netdev interface is disabled.
4278  *
4279  * Returns 0, this is not allowed to fail
4280  **/
4281 static int i40e_close(struct net_device *netdev)
4282 {
4283         struct i40e_netdev_priv *np = netdev_priv(netdev);
4284         struct i40e_vsi *vsi = np->vsi;
4285
4286         if (test_and_set_bit(__I40E_DOWN, &vsi->state))
4287                 return 0;
4288
4289         i40e_down(vsi);
4290         i40e_vsi_free_irq(vsi);
4291
4292         i40e_vsi_free_tx_resources(vsi);
4293         i40e_vsi_free_rx_resources(vsi);
4294
4295         return 0;
4296 }
4297
4298 /**
4299  * i40e_do_reset - Start a PF or Core Reset sequence
4300  * @pf: board private structure
4301  * @reset_flags: which reset is requested
4302  *
4303  * The essential difference in resets is that the PF Reset
4304  * doesn't clear the packet buffers, doesn't reset the PE
4305  * firmware, and doesn't bother the other PFs on the chip.
4306  **/
4307 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4308 {
4309         u32 val;
4310
4311         WARN_ON(in_interrupt());
4312
4313         /* do the biggest reset indicated */
4314         if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4315
4316                 /* Request a Global Reset
4317                  *
4318                  * This will start the chip's countdown to the actual full
4319                  * chip reset event, and a warning interrupt to be sent
4320                  * to all PFs, including the requestor.  Our handler
4321                  * for the warning interrupt will deal with the shutdown
4322                  * and recovery of the switch setup.
4323                  */
4324                 dev_info(&pf->pdev->dev, "GlobalR requested\n");
4325                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4326                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4327                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4328
4329         } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4330
4331                 /* Request a Core Reset
4332                  *
4333                  * Same as Global Reset, except does *not* include the MAC/PHY
4334                  */
4335                 dev_info(&pf->pdev->dev, "CoreR requested\n");
4336                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4337                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4338                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4339                 i40e_flush(&pf->hw);
4340
4341         } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
4342
4343                 /* Request a Firmware Reset
4344                  *
4345                  * Same as Global reset, plus restarting the
4346                  * embedded firmware engine.
4347                  */
4348                 /* enable EMP Reset */
4349                 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
4350                 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
4351                 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
4352
4353                 /* force the reset */
4354                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4355                 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
4356                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4357                 i40e_flush(&pf->hw);
4358
4359         } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4360
4361                 /* Request a PF Reset
4362                  *
4363                  * Resets only the PF-specific registers
4364                  *
4365                  * This goes directly to the tear-down and rebuild of
4366                  * the switch, since we need to do all the recovery as
4367                  * for the Core Reset.
4368                  */
4369                 dev_info(&pf->pdev->dev, "PFR requested\n");
4370                 i40e_handle_reset_warning(pf);
4371
4372         } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4373                 int v;
4374
4375                 /* Find the VSI(s) that requested a re-init */
4376                 dev_info(&pf->pdev->dev,
4377                          "VSI reinit requested\n");
4378                 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4379                         struct i40e_vsi *vsi = pf->vsi[v];
4380                         if (vsi != NULL &&
4381                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4382                                 i40e_vsi_reinit_locked(pf->vsi[v]);
4383                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4384                         }
4385                 }
4386
4387                 /* no further action needed, so return now */
4388                 return;
4389         } else {
4390                 dev_info(&pf->pdev->dev,
4391                          "bad reset request 0x%08x\n", reset_flags);
4392                 return;
4393         }
4394 }
4395
4396 #ifdef CONFIG_I40E_DCB
4397 /**
4398  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
4399  * @pf: board private structure
4400  * @old_cfg: current DCB config
4401  * @new_cfg: new DCB config
4402  **/
4403 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4404                             struct i40e_dcbx_config *old_cfg,
4405                             struct i40e_dcbx_config *new_cfg)
4406 {
4407         bool need_reconfig = false;
4408
4409         /* Check if ETS configuration has changed */
4410         if (memcmp(&new_cfg->etscfg,
4411                    &old_cfg->etscfg,
4412                    sizeof(new_cfg->etscfg))) {
4413                 /* If Priority Table has changed reconfig is needed */
4414                 if (memcmp(&new_cfg->etscfg.prioritytable,
4415                            &old_cfg->etscfg.prioritytable,
4416                            sizeof(new_cfg->etscfg.prioritytable))) {
4417                         need_reconfig = true;
4418                         dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
4419                 }
4420
4421                 if (memcmp(&new_cfg->etscfg.tcbwtable,
4422                            &old_cfg->etscfg.tcbwtable,
4423                            sizeof(new_cfg->etscfg.tcbwtable)))
4424                         dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4425
4426                 if (memcmp(&new_cfg->etscfg.tsatable,
4427                            &old_cfg->etscfg.tsatable,
4428                            sizeof(new_cfg->etscfg.tsatable)))
4429                         dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
4430         }
4431
4432         /* Check if PFC configuration has changed */
4433         if (memcmp(&new_cfg->pfc,
4434                    &old_cfg->pfc,
4435                    sizeof(new_cfg->pfc))) {
4436                 need_reconfig = true;
4437                 dev_info(&pf->pdev->dev, "PFC config change detected.\n");
4438         }
4439
4440         /* Check if APP Table has changed */
4441         if (memcmp(&new_cfg->app,
4442                    &old_cfg->app,
4443                    sizeof(new_cfg->app)))
4444                 need_reconfig = true;
4445                 dev_info(&pf->pdev->dev, "APP Table change detected.\n");
4446
4447         return need_reconfig;
4448 }
4449
4450 /**
4451  * i40e_handle_lldp_event - Handle LLDP Change MIB event
4452  * @pf: board private structure
4453  * @e: event info posted on ARQ
4454  **/
4455 static int i40e_handle_lldp_event(struct i40e_pf *pf,
4456                                   struct i40e_arq_event_info *e)
4457 {
4458         struct i40e_aqc_lldp_get_mib *mib =
4459                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
4460         struct i40e_hw *hw = &pf->hw;
4461         struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
4462         struct i40e_dcbx_config tmp_dcbx_cfg;
4463         bool need_reconfig = false;
4464         int ret = 0;
4465         u8 type;
4466
4467         /* Ignore if event is not for Nearest Bridge */
4468         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
4469                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
4470         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
4471                 return ret;
4472
4473         /* Check MIB Type and return if event for Remote MIB update */
4474         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
4475         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
4476                 /* Update the remote cached instance and return */
4477                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
4478                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
4479                                 &hw->remote_dcbx_config);
4480                 goto exit;
4481         }
4482
4483         /* Convert/store the DCBX data from LLDPDU temporarily */
4484         memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
4485         ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
4486         if (ret) {
4487                 /* Error in LLDPDU parsing return */
4488                 dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
4489                 goto exit;
4490         }
4491
4492         /* No change detected in DCBX configs */
4493         if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
4494                 dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4495                 goto exit;
4496         }
4497
4498         need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
4499
4500         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
4501
4502         /* Overwrite the new configuration */
4503         *dcbx_cfg = tmp_dcbx_cfg;
4504
4505         if (!need_reconfig)
4506                 goto exit;
4507
4508         /* Reconfiguration needed quiesce all VSIs */
4509         i40e_pf_quiesce_all_vsi(pf);
4510
4511         /* Changes in configuration update VEB/VSI */
4512         i40e_dcb_reconfigure(pf);
4513
4514         i40e_pf_unquiesce_all_vsi(pf);
4515 exit:
4516         return ret;
4517 }
4518 #endif /* CONFIG_I40E_DCB */
4519
4520 /**
4521  * i40e_do_reset_safe - Protected reset path for userland calls.
4522  * @pf: board private structure
4523  * @reset_flags: which reset is requested
4524  *
4525  **/
4526 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
4527 {
4528         rtnl_lock();
4529         i40e_do_reset(pf, reset_flags);
4530         rtnl_unlock();
4531 }
4532
4533 /**
4534  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4535  * @pf: board private structure
4536  * @e: event info posted on ARQ
4537  *
4538  * Handler for LAN Queue Overflow Event generated by the firmware for PF
4539  * and VF queues
4540  **/
4541 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4542                                            struct i40e_arq_event_info *e)
4543 {
4544         struct i40e_aqc_lan_overflow *data =
4545                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4546         u32 queue = le32_to_cpu(data->prtdcb_rupto);
4547         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4548         struct i40e_hw *hw = &pf->hw;
4549         struct i40e_vf *vf;
4550         u16 vf_id;
4551
4552         dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4553                  __func__, queue, qtx_ctl);
4554
4555         /* Queue belongs to VF, find the VF and issue VF reset */
4556         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4557             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4558                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4559                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4560                 vf_id -= hw->func_caps.vf_base_id;
4561                 vf = &pf->vf[vf_id];
4562                 i40e_vc_notify_vf_reset(vf);
4563                 /* Allow VF to process pending reset notification */
4564                 msleep(20);
4565                 i40e_reset_vf(vf, false);
4566         }
4567 }
4568
4569 /**
4570  * i40e_service_event_complete - Finish up the service event
4571  * @pf: board private structure
4572  **/
4573 static void i40e_service_event_complete(struct i40e_pf *pf)
4574 {
4575         BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4576
4577         /* flush memory to make sure state is correct before next watchog */
4578         smp_mb__before_clear_bit();
4579         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4580 }
4581
4582 /**
4583  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4584  * @pf: board private structure
4585  **/
4586 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4587 {
4588         if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4589                 return;
4590
4591         pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4592
4593         /* if interface is down do nothing */
4594         if (test_bit(__I40E_DOWN, &pf->state))
4595                 return;
4596 }
4597
4598 /**
4599  * i40e_vsi_link_event - notify VSI of a link event
4600  * @vsi: vsi to be notified
4601  * @link_up: link up or down
4602  **/
4603 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4604 {
4605         if (!vsi)
4606                 return;
4607
4608         switch (vsi->type) {
4609         case I40E_VSI_MAIN:
4610                 if (!vsi->netdev || !vsi->netdev_registered)
4611                         break;
4612
4613                 if (link_up) {
4614                         netif_carrier_on(vsi->netdev);
4615                         netif_tx_wake_all_queues(vsi->netdev);
4616                 } else {
4617                         netif_carrier_off(vsi->netdev);
4618                         netif_tx_stop_all_queues(vsi->netdev);
4619                 }
4620                 break;
4621
4622         case I40E_VSI_SRIOV:
4623                 break;
4624
4625         case I40E_VSI_VMDQ2:
4626         case I40E_VSI_CTRL:
4627         case I40E_VSI_MIRROR:
4628         default:
4629                 /* there is no notification for other VSIs */
4630                 break;
4631         }
4632 }
4633
4634 /**
4635  * i40e_veb_link_event - notify elements on the veb of a link event
4636  * @veb: veb to be notified
4637  * @link_up: link up or down
4638  **/
4639 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4640 {
4641         struct i40e_pf *pf;
4642         int i;
4643
4644         if (!veb || !veb->pf)
4645                 return;
4646         pf = veb->pf;
4647
4648         /* depth first... */
4649         for (i = 0; i < I40E_MAX_VEB; i++)
4650                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4651                         i40e_veb_link_event(pf->veb[i], link_up);
4652
4653         /* ... now the local VSIs */
4654         for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4655                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4656                         i40e_vsi_link_event(pf->vsi[i], link_up);
4657 }
4658
4659 /**
4660  * i40e_link_event - Update netif_carrier status
4661  * @pf: board private structure
4662  **/
4663 static void i40e_link_event(struct i40e_pf *pf)
4664 {
4665         bool new_link, old_link;
4666
4667         new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4668         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4669
4670         if (new_link == old_link)
4671                 return;
4672
4673         if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4674                 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4675                             "NIC Link is %s\n", (new_link ? "Up" : "Down"));
4676
4677         /* Notify the base of the switch tree connected to
4678          * the link.  Floating VEBs are not notified.
4679          */
4680         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4681                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4682         else
4683                 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4684
4685         if (pf->vf)
4686                 i40e_vc_notify_link_state(pf);
4687
4688         if (pf->flags & I40E_FLAG_PTP)
4689                 i40e_ptp_set_increment(pf);
4690 }
4691
4692 /**
4693  * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4694  * @pf: board private structure
4695  *
4696  * Set the per-queue flags to request a check for stuck queues in the irq
4697  * clean functions, then force interrupts to be sure the irq clean is called.
4698  **/
4699 static void i40e_check_hang_subtask(struct i40e_pf *pf)
4700 {
4701         int i, v;
4702
4703         /* If we're down or resetting, just bail */
4704         if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4705                 return;
4706
4707         /* for each VSI/netdev
4708          *     for each Tx queue
4709          *         set the check flag
4710          *     for each q_vector
4711          *         force an interrupt
4712          */
4713         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4714                 struct i40e_vsi *vsi = pf->vsi[v];
4715                 int armed = 0;
4716
4717                 if (!pf->vsi[v] ||
4718                     test_bit(__I40E_DOWN, &vsi->state) ||
4719                     (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4720                         continue;
4721
4722                 for (i = 0; i < vsi->num_queue_pairs; i++) {
4723                         set_check_for_tx_hang(vsi->tx_rings[i]);
4724                         if (test_bit(__I40E_HANG_CHECK_ARMED,
4725                                      &vsi->tx_rings[i]->state))
4726                                 armed++;
4727                 }
4728
4729                 if (armed) {
4730                         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4731                                 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4732                                      (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4733                                       I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4734                         } else {
4735                                 u16 vec = vsi->base_vector - 1;
4736                                 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4737                                            I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4738                                 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4739                                         wr32(&vsi->back->hw,
4740                                              I40E_PFINT_DYN_CTLN(vec), val);
4741                         }
4742                         i40e_flush(&vsi->back->hw);
4743                 }
4744         }
4745 }
4746
4747 /**
4748  * i40e_watchdog_subtask - Check and bring link up
4749  * @pf: board private structure
4750  **/
4751 static void i40e_watchdog_subtask(struct i40e_pf *pf)
4752 {
4753         int i;
4754
4755         /* if interface is down do nothing */
4756         if (test_bit(__I40E_DOWN, &pf->state) ||
4757             test_bit(__I40E_CONFIG_BUSY, &pf->state))
4758                 return;
4759
4760         /* Update the stats for active netdevs so the network stack
4761          * can look at updated numbers whenever it cares to
4762          */
4763         for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4764                 if (pf->vsi[i] && pf->vsi[i]->netdev)
4765                         i40e_update_stats(pf->vsi[i]);
4766
4767         /* Update the stats for the active switching components */
4768         for (i = 0; i < I40E_MAX_VEB; i++)
4769                 if (pf->veb[i])
4770                         i40e_update_veb_stats(pf->veb[i]);
4771
4772         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
4773 }
4774
4775 /**
4776  * i40e_reset_subtask - Set up for resetting the device and driver
4777  * @pf: board private structure
4778  **/
4779 static void i40e_reset_subtask(struct i40e_pf *pf)
4780 {
4781         u32 reset_flags = 0;
4782
4783         rtnl_lock();
4784         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4785                 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4786                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4787         }
4788         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4789                 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4790                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4791         }
4792         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4793                 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4794                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4795         }
4796         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4797                 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4798                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4799         }
4800
4801         /* If there's a recovery already waiting, it takes
4802          * precedence before starting a new reset sequence.
4803          */
4804         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4805                 i40e_handle_reset_warning(pf);
4806                 goto unlock;
4807         }
4808
4809         /* If we're already down or resetting, just bail */
4810         if (reset_flags &&
4811             !test_bit(__I40E_DOWN, &pf->state) &&
4812             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4813                 i40e_do_reset(pf, reset_flags);
4814
4815 unlock:
4816         rtnl_unlock();
4817 }
4818
4819 /**
4820  * i40e_handle_link_event - Handle link event
4821  * @pf: board private structure
4822  * @e: event info posted on ARQ
4823  **/
4824 static void i40e_handle_link_event(struct i40e_pf *pf,
4825                                    struct i40e_arq_event_info *e)
4826 {
4827         struct i40e_hw *hw = &pf->hw;
4828         struct i40e_aqc_get_link_status *status =
4829                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4830         struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4831
4832         /* save off old link status information */
4833         memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4834                sizeof(pf->hw.phy.link_info_old));
4835
4836         /* update link status */
4837         hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4838         hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4839         hw_link_info->link_info = status->link_info;
4840         hw_link_info->an_info = status->an_info;
4841         hw_link_info->ext_info = status->ext_info;
4842         hw_link_info->lse_enable =
4843                 le16_to_cpu(status->command_flags) &
4844                             I40E_AQ_LSE_ENABLE;
4845
4846         /* process the event */
4847         i40e_link_event(pf);
4848
4849         /* Do a new status request to re-enable LSE reporting
4850          * and load new status information into the hw struct,
4851          * then see if the status changed while processing the
4852          * initial event.
4853          */
4854         i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4855         i40e_link_event(pf);
4856 }
4857
4858 /**
4859  * i40e_clean_adminq_subtask - Clean the AdminQ rings
4860  * @pf: board private structure
4861  **/
4862 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4863 {
4864         struct i40e_arq_event_info event;
4865         struct i40e_hw *hw = &pf->hw;
4866         u16 pending, i = 0;
4867         i40e_status ret;
4868         u16 opcode;
4869         u32 val;
4870
4871         if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4872                 return;
4873
4874         event.msg_size = I40E_MAX_AQ_BUF_SIZE;
4875         event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4876         if (!event.msg_buf)
4877                 return;
4878
4879         do {
4880                 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
4881                 ret = i40e_clean_arq_element(hw, &event, &pending);
4882                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4883                         dev_info(&pf->pdev->dev, "No ARQ event found\n");
4884                         break;
4885                 } else if (ret) {
4886                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4887                         break;
4888                 }
4889
4890                 opcode = le16_to_cpu(event.desc.opcode);
4891                 switch (opcode) {
4892
4893                 case i40e_aqc_opc_get_link_status:
4894                         i40e_handle_link_event(pf, &event);
4895                         break;
4896                 case i40e_aqc_opc_send_msg_to_pf:
4897                         ret = i40e_vc_process_vf_msg(pf,
4898                                         le16_to_cpu(event.desc.retval),
4899                                         le32_to_cpu(event.desc.cookie_high),
4900                                         le32_to_cpu(event.desc.cookie_low),
4901                                         event.msg_buf,
4902                                         event.msg_size);
4903                         break;
4904                 case i40e_aqc_opc_lldp_update_mib:
4905                         dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4906 #ifdef CONFIG_I40E_DCB
4907                         rtnl_lock();
4908                         ret = i40e_handle_lldp_event(pf, &event);
4909                         rtnl_unlock();
4910 #endif /* CONFIG_I40E_DCB */
4911                         break;
4912                 case i40e_aqc_opc_event_lan_overflow:
4913                         dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4914                         i40e_handle_lan_overflow_event(pf, &event);
4915                         break;
4916                 case i40e_aqc_opc_send_msg_to_peer:
4917                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
4918                         break;
4919                 default:
4920                         dev_info(&pf->pdev->dev,
4921                                  "ARQ Error: Unknown event 0x%04x received\n",
4922                                  opcode);
4923                         break;
4924                 }
4925         } while (pending && (i++ < pf->adminq_work_limit));
4926
4927         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4928         /* re-enable Admin queue interrupt cause */
4929         val = rd32(hw, I40E_PFINT_ICR0_ENA);
4930         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4931         wr32(hw, I40E_PFINT_ICR0_ENA, val);
4932         i40e_flush(hw);
4933
4934         kfree(event.msg_buf);
4935 }
4936
4937 /**
4938  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4939  * @veb: pointer to the VEB instance
4940  *
4941  * This is a recursive function that first builds the attached VSIs then
4942  * recurses in to build the next layer of VEB.  We track the connections
4943  * through our own index numbers because the seid's from the HW could
4944  * change across the reset.
4945  **/
4946 static int i40e_reconstitute_veb(struct i40e_veb *veb)
4947 {
4948         struct i40e_vsi *ctl_vsi = NULL;
4949         struct i40e_pf *pf = veb->pf;
4950         int v, veb_idx;
4951         int ret;
4952
4953         /* build VSI that owns this VEB, temporarily attached to base VEB */
4954         for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4955                 if (pf->vsi[v] &&
4956                     pf->vsi[v]->veb_idx == veb->idx &&
4957                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4958                         ctl_vsi = pf->vsi[v];
4959                         break;
4960                 }
4961         }
4962         if (!ctl_vsi) {
4963                 dev_info(&pf->pdev->dev,
4964                          "missing owner VSI for veb_idx %d\n", veb->idx);
4965                 ret = -ENOENT;
4966                 goto end_reconstitute;
4967         }
4968         if (ctl_vsi != pf->vsi[pf->lan_vsi])
4969                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4970         ret = i40e_add_vsi(ctl_vsi);
4971         if (ret) {
4972                 dev_info(&pf->pdev->dev,
4973                          "rebuild of owner VSI failed: %d\n", ret);
4974                 goto end_reconstitute;
4975         }
4976         i40e_vsi_reset_stats(ctl_vsi);
4977
4978         /* create the VEB in the switch and move the VSI onto the VEB */
4979         ret = i40e_add_veb(veb, ctl_vsi);
4980         if (ret)
4981                 goto end_reconstitute;
4982
4983         /* create the remaining VSIs attached to this VEB */
4984         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4985                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4986                         continue;
4987
4988                 if (pf->vsi[v]->veb_idx == veb->idx) {
4989                         struct i40e_vsi *vsi = pf->vsi[v];
4990                         vsi->uplink_seid = veb->seid;
4991                         ret = i40e_add_vsi(vsi);
4992                         if (ret) {
4993                                 dev_info(&pf->pdev->dev,
4994                                          "rebuild of vsi_idx %d failed: %d\n",
4995                                          v, ret);
4996                                 goto end_reconstitute;
4997                         }
4998                         i40e_vsi_reset_stats(vsi);
4999                 }
5000         }
5001
5002         /* create any VEBs attached to this VEB - RECURSION */
5003         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5004                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5005                         pf->veb[veb_idx]->uplink_seid = veb->seid;
5006                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5007                         if (ret)
5008                                 break;
5009                 }
5010         }
5011
5012 end_reconstitute:
5013         return ret;
5014 }
5015
5016 /**
5017  * i40e_get_capabilities - get info about the HW
5018  * @pf: the PF struct
5019  **/
5020 static int i40e_get_capabilities(struct i40e_pf *pf)
5021 {
5022         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
5023         u16 data_size;
5024         int buf_len;
5025         int err;
5026
5027         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
5028         do {
5029                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
5030                 if (!cap_buf)
5031                         return -ENOMEM;
5032
5033                 /* this loads the data into the hw struct for us */
5034                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
5035                                             &data_size,
5036                                             i40e_aqc_opc_list_func_capabilities,
5037                                             NULL);
5038                 /* data loaded, buffer no longer needed */
5039                 kfree(cap_buf);
5040
5041                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
5042                         /* retry with a larger buffer */
5043                         buf_len = data_size;
5044                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
5045                         dev_info(&pf->pdev->dev,
5046                                  "capability discovery failed: aq=%d\n",
5047                                  pf->hw.aq.asq_last_status);
5048                         return -ENODEV;
5049                 }
5050         } while (err);
5051
5052         /* increment MSI-X count because current FW skips one */
5053         pf->hw.func_caps.num_msix_vectors++;
5054
5055         if (pf->hw.debug_mask & I40E_DEBUG_USER)
5056                 dev_info(&pf->pdev->dev,
5057                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
5058                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
5059                          pf->hw.func_caps.num_msix_vectors,
5060                          pf->hw.func_caps.num_msix_vectors_vf,
5061                          pf->hw.func_caps.fd_filters_guaranteed,
5062                          pf->hw.func_caps.fd_filters_best_effort,
5063                          pf->hw.func_caps.num_tx_qp,
5064                          pf->hw.func_caps.num_vsis);
5065
5066 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
5067                        + pf->hw.func_caps.num_vfs)
5068         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
5069                 dev_info(&pf->pdev->dev,
5070                          "got num_vsis %d, setting num_vsis to %d\n",
5071                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
5072                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
5073         }
5074
5075         return 0;
5076 }
5077
5078 static int i40e_vsi_clear(struct i40e_vsi *vsi);
5079
5080 /**
5081  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
5082  * @pf: board private structure
5083  **/
5084 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5085 {
5086         struct i40e_vsi *vsi;
5087         bool new_vsi = false;
5088         int err, i;
5089
5090         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
5091                 return;
5092
5093         /* find existing VSI and see if it needs configuring */
5094         vsi = NULL;
5095         for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5096                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5097                         vsi = pf->vsi[i];
5098                         break;
5099                 }
5100         }
5101
5102         /* create a new VSI if none exists */
5103         if (!vsi) {
5104                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
5105                                      pf->vsi[pf->lan_vsi]->seid, 0);
5106                 if (!vsi) {
5107                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
5108                         goto err_vsi;
5109                 }
5110                 new_vsi = true;
5111         }
5112         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
5113
5114         err = i40e_vsi_setup_tx_resources(vsi);
5115         if (err)
5116                 goto err_setup_tx;
5117         err = i40e_vsi_setup_rx_resources(vsi);
5118         if (err)
5119                 goto err_setup_rx;
5120
5121         if (new_vsi) {
5122                 char int_name[IFNAMSIZ + 9];
5123                 err = i40e_vsi_configure(vsi);
5124                 if (err)
5125                         goto err_setup_rx;
5126                 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
5127                          dev_driver_string(&pf->pdev->dev));
5128                 err = i40e_vsi_request_irq(vsi, int_name);
5129                 if (err)
5130                         goto err_setup_rx;
5131                 err = i40e_up_complete(vsi);
5132                 if (err)
5133                         goto err_up_complete;
5134         }
5135
5136         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5137         return;
5138
5139 err_up_complete:
5140         i40e_down(vsi);
5141         i40e_vsi_free_irq(vsi);
5142 err_setup_rx:
5143         i40e_vsi_free_rx_resources(vsi);
5144 err_setup_tx:
5145         i40e_vsi_free_tx_resources(vsi);
5146 err_vsi:
5147         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
5148         i40e_vsi_clear(vsi);
5149 }
5150
5151 /**
5152  * i40e_fdir_teardown - release the Flow Director resources
5153  * @pf: board private structure
5154  **/
5155 static void i40e_fdir_teardown(struct i40e_pf *pf)
5156 {
5157         int i;
5158
5159         for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5160                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5161                         i40e_vsi_release(pf->vsi[i]);
5162                         break;
5163                 }
5164         }
5165 }
5166
5167 /**
5168  * i40e_prep_for_reset - prep for the core to reset
5169  * @pf: board private structure
5170  *
5171  * Close up the VFs and other things in prep for pf Reset.
5172   **/
5173 static int i40e_prep_for_reset(struct i40e_pf *pf)
5174 {
5175         struct i40e_hw *hw = &pf->hw;
5176         i40e_status ret;
5177         u32 v;
5178
5179         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
5180         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
5181                 return 0;
5182
5183         dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5184
5185         if (i40e_check_asq_alive(hw))
5186                 i40e_vc_notify_reset(pf);
5187
5188         /* quiesce the VSIs and their queues that are not already DOWN */
5189         i40e_pf_quiesce_all_vsi(pf);
5190
5191         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5192                 if (pf->vsi[v])
5193                         pf->vsi[v]->seid = 0;
5194         }
5195
5196         i40e_shutdown_adminq(&pf->hw);
5197
5198         /* call shutdown HMC */
5199         ret = i40e_shutdown_lan_hmc(hw);
5200         if (ret) {
5201                 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
5202                 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5203         }
5204         return ret;
5205 }
5206
5207 /**
5208  * i40e_reset_and_rebuild - reset and rebuild using a saved config
5209  * @pf: board private structure
5210  * @reinit: if the Main VSI needs to re-initialized.
5211  **/
5212 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5213 {
5214         struct i40e_driver_version dv;
5215         struct i40e_hw *hw = &pf->hw;
5216         i40e_status ret;
5217         u32 v;
5218
5219         /* Now we wait for GRST to settle out.
5220          * We don't have to delete the VEBs or VSIs from the hw switch
5221          * because the reset will make them disappear.
5222          */
5223         ret = i40e_pf_reset(hw);
5224         if (ret)
5225                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5226         pf->pfr_count++;
5227
5228         if (test_bit(__I40E_DOWN, &pf->state))
5229                 goto end_core_reset;
5230         dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
5231
5232         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5233         ret = i40e_init_adminq(&pf->hw);
5234         if (ret) {
5235                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
5236                 goto end_core_reset;
5237         }
5238
5239         ret = i40e_get_capabilities(pf);
5240         if (ret) {
5241                 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
5242                          ret);
5243                 goto end_core_reset;
5244         }
5245
5246         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
5247                                 hw->func_caps.num_rx_qp,
5248                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
5249         if (ret) {
5250                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
5251                 goto end_core_reset;
5252         }
5253         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
5254         if (ret) {
5255                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
5256                 goto end_core_reset;
5257         }
5258
5259 #ifdef CONFIG_I40E_DCB
5260         ret = i40e_init_pf_dcb(pf);
5261         if (ret) {
5262                 dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
5263                 goto end_core_reset;
5264         }
5265 #endif /* CONFIG_I40E_DCB */
5266
5267         /* do basic switch setup */
5268         ret = i40e_setup_pf_switch(pf, reinit);
5269         if (ret)
5270                 goto end_core_reset;
5271
5272         /* Rebuild the VSIs and VEBs that existed before reset.
5273          * They are still in our local switch element arrays, so only
5274          * need to rebuild the switch model in the HW.
5275          *
5276          * If there were VEBs but the reconstitution failed, we'll try
5277          * try to recover minimal use by getting the basic PF VSI working.
5278          */
5279         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
5280                 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
5281                 /* find the one VEB connected to the MAC, and find orphans */
5282                 for (v = 0; v < I40E_MAX_VEB; v++) {
5283                         if (!pf->veb[v])
5284                                 continue;
5285
5286                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
5287                             pf->veb[v]->uplink_seid == 0) {
5288                                 ret = i40e_reconstitute_veb(pf->veb[v]);
5289
5290                                 if (!ret)
5291                                         continue;
5292
5293                                 /* If Main VEB failed, we're in deep doodoo,
5294                                  * so give up rebuilding the switch and set up
5295                                  * for minimal rebuild of PF VSI.
5296                                  * If orphan failed, we'll report the error
5297                                  * but try to keep going.
5298                                  */
5299                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
5300                                         dev_info(&pf->pdev->dev,
5301                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
5302                                                  ret);
5303                                         pf->vsi[pf->lan_vsi]->uplink_seid
5304                                                                 = pf->mac_seid;
5305                                         break;
5306                                 } else if (pf->veb[v]->uplink_seid == 0) {
5307                                         dev_info(&pf->pdev->dev,
5308                                                  "rebuild of orphan VEB failed: %d\n",
5309                                                  ret);
5310                                 }
5311                         }
5312                 }
5313         }
5314
5315         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
5316                 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
5317                 /* no VEB, so rebuild only the Main VSI */
5318                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
5319                 if (ret) {
5320                         dev_info(&pf->pdev->dev,
5321                                  "rebuild of Main VSI failed: %d\n", ret);
5322                         goto end_core_reset;
5323                 }
5324         }
5325
5326         /* reinit the misc interrupt */
5327         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5328                 ret = i40e_setup_misc_vector(pf);
5329
5330         /* restart the VSIs that were rebuilt and running before the reset */
5331         i40e_pf_unquiesce_all_vsi(pf);
5332
5333         /* tell the firmware that we're starting */
5334         dv.major_version = DRV_VERSION_MAJOR;
5335         dv.minor_version = DRV_VERSION_MINOR;
5336         dv.build_version = DRV_VERSION_BUILD;
5337         dv.subbuild_version = 0;
5338         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5339
5340         dev_info(&pf->pdev->dev, "PF reset done\n");
5341
5342 end_core_reset:
5343         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5344 }
5345
5346 /**
5347  * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
5348  * @pf: board private structure
5349  *
5350  * Close up the VFs and other things in prep for a Core Reset,
5351  * then get ready to rebuild the world.
5352  **/
5353 static void i40e_handle_reset_warning(struct i40e_pf *pf)
5354 {
5355         i40e_status ret;
5356
5357         ret = i40e_prep_for_reset(pf);
5358         if (!ret)
5359                 i40e_reset_and_rebuild(pf, false);
5360 }
5361
5362 /**
5363  * i40e_handle_mdd_event
5364  * @pf: pointer to the pf structure
5365  *
5366  * Called from the MDD irq handler to identify possibly malicious vfs
5367  **/
5368 static void i40e_handle_mdd_event(struct i40e_pf *pf)
5369 {
5370         struct i40e_hw *hw = &pf->hw;
5371         bool mdd_detected = false;
5372         struct i40e_vf *vf;
5373         u32 reg;
5374         int i;
5375
5376         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
5377                 return;
5378
5379         /* find what triggered the MDD event */
5380         reg = rd32(hw, I40E_GL_MDET_TX);
5381         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
5382                 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
5383                                 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
5384                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
5385                                 >> I40E_GL_MDET_TX_EVENT_SHIFT;
5386                 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
5387                                 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
5388                 dev_info(&pf->pdev->dev,
5389                          "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
5390                          event, queue, func);
5391                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
5392                 mdd_detected = true;
5393         }
5394         reg = rd32(hw, I40E_GL_MDET_RX);
5395         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
5396                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
5397                                 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
5398                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
5399                                 >> I40E_GL_MDET_RX_EVENT_SHIFT;
5400                 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
5401                                 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
5402                 dev_info(&pf->pdev->dev,
5403                          "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
5404                          event, queue, func);
5405                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
5406                 mdd_detected = true;
5407         }
5408
5409         /* see if one of the VFs needs its hand slapped */
5410         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
5411                 vf = &(pf->vf[i]);
5412                 reg = rd32(hw, I40E_VP_MDET_TX(i));
5413                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
5414                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
5415                         vf->num_mdd_events++;
5416                         dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
5417                 }
5418
5419                 reg = rd32(hw, I40E_VP_MDET_RX(i));
5420                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
5421                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
5422                         vf->num_mdd_events++;
5423                         dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
5424                 }
5425
5426                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
5427                         dev_info(&pf->pdev->dev,
5428                                  "Too many MDD events on VF %d, disabled\n", i);
5429                         dev_info(&pf->pdev->dev,
5430                                  "Use PF Control I/F to re-enable the VF\n");
5431                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
5432                 }
5433         }
5434
5435         /* re-enable mdd interrupt cause */
5436         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
5437         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
5438         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
5439         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
5440         i40e_flush(hw);
5441 }
5442
5443 #ifdef CONFIG_I40E_VXLAN
5444 /**
5445  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
5446  * @pf: board private structure
5447  **/
5448 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5449 {
5450         const int vxlan_hdr_qwords = 4;
5451         struct i40e_hw *hw = &pf->hw;
5452         i40e_status ret;
5453         u8 filter_index;
5454         __be16 port;
5455         int i;
5456
5457         if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
5458                 return;
5459
5460         pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
5461
5462         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5463                 if (pf->pending_vxlan_bitmap & (1 << i)) {
5464                         pf->pending_vxlan_bitmap &= ~(1 << i);
5465                         port = pf->vxlan_ports[i];
5466                         ret = port ?
5467                               i40e_aq_add_udp_tunnel(hw, ntohs(port),
5468                                                      vxlan_hdr_qwords,
5469                                                      I40E_AQC_TUNNEL_TYPE_VXLAN,
5470                                                      &filter_index, NULL)
5471                               : i40e_aq_del_udp_tunnel(hw, i, NULL);
5472
5473                         if (ret) {
5474                                 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
5475                                          port ? "adding" : "deleting",
5476                                          ntohs(port), port ? i : i);
5477
5478                                 pf->vxlan_ports[i] = 0;
5479                         } else {
5480                                 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
5481                                          port ? "Added" : "Deleted",
5482                                          ntohs(port), port ? i : filter_index);
5483                         }
5484                 }
5485         }
5486 }
5487
5488 #endif
5489 /**
5490  * i40e_service_task - Run the driver's async subtasks
5491  * @work: pointer to work_struct containing our data
5492  **/
5493 static void i40e_service_task(struct work_struct *work)
5494 {
5495         struct i40e_pf *pf = container_of(work,
5496                                           struct i40e_pf,
5497                                           service_task);
5498         unsigned long start_time = jiffies;
5499
5500         i40e_reset_subtask(pf);
5501         i40e_handle_mdd_event(pf);
5502         i40e_vc_process_vflr_event(pf);
5503         i40e_watchdog_subtask(pf);
5504         i40e_fdir_reinit_subtask(pf);
5505         i40e_check_hang_subtask(pf);
5506         i40e_sync_filters_subtask(pf);
5507 #ifdef CONFIG_I40E_VXLAN
5508         i40e_sync_vxlan_filters_subtask(pf);
5509 #endif
5510         i40e_clean_adminq_subtask(pf);
5511
5512         i40e_service_event_complete(pf);
5513
5514         /* If the tasks have taken longer than one timer cycle or there
5515          * is more work to be done, reschedule the service task now
5516          * rather than wait for the timer to tick again.
5517          */
5518         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
5519             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
5520             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
5521             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
5522                 i40e_service_event_schedule(pf);
5523 }
5524
5525 /**
5526  * i40e_service_timer - timer callback
5527  * @data: pointer to PF struct
5528  **/
5529 static void i40e_service_timer(unsigned long data)
5530 {
5531         struct i40e_pf *pf = (struct i40e_pf *)data;
5532
5533         mod_timer(&pf->service_timer,
5534                   round_jiffies(jiffies + pf->service_timer_period));
5535         i40e_service_event_schedule(pf);
5536 }
5537
5538 /**
5539  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
5540  * @vsi: the VSI being configured
5541  **/
5542 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
5543 {
5544         struct i40e_pf *pf = vsi->back;
5545
5546         switch (vsi->type) {
5547         case I40E_VSI_MAIN:
5548                 vsi->alloc_queue_pairs = pf->num_lan_qps;
5549                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5550                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5551                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5552                         vsi->num_q_vectors = pf->num_lan_msix;
5553                 else
5554                         vsi->num_q_vectors = 1;
5555
5556                 break;
5557
5558         case I40E_VSI_FDIR:
5559                 vsi->alloc_queue_pairs = 1;
5560                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
5561                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5562                 vsi->num_q_vectors = 1;
5563                 break;
5564
5565         case I40E_VSI_VMDQ2:
5566                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
5567                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5568                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5569                 vsi->num_q_vectors = pf->num_vmdq_msix;
5570                 break;
5571
5572         case I40E_VSI_SRIOV:
5573                 vsi->alloc_queue_pairs = pf->num_vf_qps;
5574                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5575                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
5576                 break;
5577
5578         default:
5579                 WARN_ON(1);
5580                 return -ENODATA;
5581         }
5582
5583         return 0;
5584 }
5585
5586 /**
5587  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
5588  * @type: VSI pointer
5589  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
5590  *
5591  * On error: returns error code (negative)
5592  * On success: returns 0
5593  **/
5594 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
5595 {
5596         int size;
5597         int ret = 0;
5598
5599         /* allocate memory for both Tx and Rx ring pointers */
5600         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5601         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
5602         if (!vsi->tx_rings)
5603                 return -ENOMEM;
5604         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5605
5606         if (alloc_qvectors) {
5607                 /* allocate memory for q_vector pointers */
5608                 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5609                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
5610                 if (!vsi->q_vectors) {
5611                         ret = -ENOMEM;
5612                         goto err_vectors;
5613                 }
5614         }
5615         return ret;
5616
5617 err_vectors:
5618         kfree(vsi->tx_rings);
5619         return ret;
5620 }
5621
5622 /**
5623  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
5624  * @pf: board private structure
5625  * @type: type of VSI
5626  *
5627  * On error: returns error code (negative)
5628  * On success: returns vsi index in PF (positive)
5629  **/
5630 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5631 {
5632         int ret = -ENODEV;
5633         struct i40e_vsi *vsi;
5634         int vsi_idx;
5635         int i;
5636
5637         /* Need to protect the allocation of the VSIs at the PF level */
5638         mutex_lock(&pf->switch_mutex);
5639
5640         /* VSI list may be fragmented if VSI creation/destruction has
5641          * been happening.  We can afford to do a quick scan to look
5642          * for any free VSIs in the list.
5643          *
5644          * find next empty vsi slot, looping back around if necessary
5645          */
5646         i = pf->next_vsi;
5647         while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
5648                 i++;
5649         if (i >= pf->hw.func_caps.num_vsis) {
5650                 i = 0;
5651                 while (i < pf->next_vsi && pf->vsi[i])
5652                         i++;
5653         }
5654
5655         if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
5656                 vsi_idx = i;             /* Found one! */
5657         } else {
5658                 ret = -ENODEV;
5659                 goto unlock_pf;  /* out of VSI slots! */
5660         }
5661         pf->next_vsi = ++i;
5662
5663         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
5664         if (!vsi) {
5665                 ret = -ENOMEM;
5666                 goto unlock_pf;
5667         }
5668         vsi->type = type;
5669         vsi->back = pf;
5670         set_bit(__I40E_DOWN, &vsi->state);
5671         vsi->flags = 0;
5672         vsi->idx = vsi_idx;
5673         vsi->rx_itr_setting = pf->rx_itr_default;
5674         vsi->tx_itr_setting = pf->tx_itr_default;
5675         vsi->netdev_registered = false;
5676         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5677         INIT_LIST_HEAD(&vsi->mac_filter_list);
5678
5679         ret = i40e_set_num_rings_in_vsi(vsi);
5680         if (ret)
5681                 goto err_rings;
5682
5683         ret = i40e_vsi_alloc_arrays(vsi, true);
5684         if (ret)
5685                 goto err_rings;
5686
5687         /* Setup default MSIX irq handler for VSI */
5688         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
5689
5690         pf->vsi[vsi_idx] = vsi;
5691         ret = vsi_idx;
5692         goto unlock_pf;
5693
5694 err_rings:
5695         pf->next_vsi = i - 1;
5696         kfree(vsi);
5697 unlock_pf:
5698         mutex_unlock(&pf->switch_mutex);
5699         return ret;
5700 }
5701
5702 /**
5703  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
5704  * @type: VSI pointer
5705  * @free_qvectors: a bool to specify if q_vectors need to be freed.
5706  *
5707  * On error: returns error code (negative)
5708  * On success: returns 0
5709  **/
5710 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
5711 {
5712         /* free the ring and vector containers */
5713         if (free_qvectors) {
5714                 kfree(vsi->q_vectors);
5715                 vsi->q_vectors = NULL;
5716         }
5717         kfree(vsi->tx_rings);
5718         vsi->tx_rings = NULL;
5719         vsi->rx_rings = NULL;
5720 }
5721
5722 /**
5723  * i40e_vsi_clear - Deallocate the VSI provided
5724  * @vsi: the VSI being un-configured
5725  **/
5726 static int i40e_vsi_clear(struct i40e_vsi *vsi)
5727 {
5728         struct i40e_pf *pf;
5729
5730         if (!vsi)
5731                 return 0;
5732
5733         if (!vsi->back)
5734                 goto free_vsi;
5735         pf = vsi->back;
5736
5737         mutex_lock(&pf->switch_mutex);
5738         if (!pf->vsi[vsi->idx]) {
5739                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5740                         vsi->idx, vsi->idx, vsi, vsi->type);
5741                 goto unlock_vsi;
5742         }
5743
5744         if (pf->vsi[vsi->idx] != vsi) {
5745                 dev_err(&pf->pdev->dev,
5746                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5747                         pf->vsi[vsi->idx]->idx,
5748                         pf->vsi[vsi->idx],
5749                         pf->vsi[vsi->idx]->type,
5750                         vsi->idx, vsi, vsi->type);
5751                 goto unlock_vsi;
5752         }
5753
5754         /* updates the pf for this cleared vsi */
5755         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5756         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5757
5758         i40e_vsi_free_arrays(vsi, true);
5759
5760         pf->vsi[vsi->idx] = NULL;
5761         if (vsi->idx < pf->next_vsi)
5762                 pf->next_vsi = vsi->idx;
5763
5764 unlock_vsi:
5765         mutex_unlock(&pf->switch_mutex);
5766 free_vsi:
5767         kfree(vsi);
5768
5769         return 0;
5770 }
5771
5772 /**
5773  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5774  * @vsi: the VSI being cleaned
5775  **/
5776 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5777 {
5778         int i;
5779
5780         if (vsi->tx_rings && vsi->tx_rings[0]) {
5781                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5782                         kfree_rcu(vsi->tx_rings[i], rcu);
5783                         vsi->tx_rings[i] = NULL;
5784                         vsi->rx_rings[i] = NULL;
5785                 }
5786         }
5787 }
5788
5789 /**
5790  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5791  * @vsi: the VSI being configured
5792  **/
5793 static int i40e_alloc_rings(struct i40e_vsi *vsi)
5794 {
5795         struct i40e_pf *pf = vsi->back;
5796         int i;
5797
5798         /* Set basic values in the rings to be used later during open() */
5799         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5800                 struct i40e_ring *tx_ring;
5801                 struct i40e_ring *rx_ring;
5802
5803                 /* allocate space for both Tx and Rx in one shot */
5804                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5805                 if (!tx_ring)
5806                         goto err_out;
5807
5808                 tx_ring->queue_index = i;
5809                 tx_ring->reg_idx = vsi->base_queue + i;
5810                 tx_ring->ring_active = false;
5811                 tx_ring->vsi = vsi;
5812                 tx_ring->netdev = vsi->netdev;
5813                 tx_ring->dev = &pf->pdev->dev;
5814                 tx_ring->count = vsi->num_desc;
5815                 tx_ring->size = 0;
5816                 tx_ring->dcb_tc = 0;
5817                 vsi->tx_rings[i] = tx_ring;
5818
5819                 rx_ring = &tx_ring[1];
5820                 rx_ring->queue_index = i;
5821                 rx_ring->reg_idx = vsi->base_queue + i;
5822                 rx_ring->ring_active = false;
5823                 rx_ring->vsi = vsi;
5824                 rx_ring->netdev = vsi->netdev;
5825                 rx_ring->dev = &pf->pdev->dev;
5826                 rx_ring->count = vsi->num_desc;
5827                 rx_ring->size = 0;
5828                 rx_ring->dcb_tc = 0;
5829                 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5830                         set_ring_16byte_desc_enabled(rx_ring);
5831                 else
5832                         clear_ring_16byte_desc_enabled(rx_ring);
5833                 vsi->rx_rings[i] = rx_ring;
5834         }
5835
5836         return 0;
5837
5838 err_out:
5839         i40e_vsi_clear_rings(vsi);
5840         return -ENOMEM;
5841 }
5842
5843 /**
5844  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
5845  * @pf: board private structure
5846  * @vectors: the number of MSI-X vectors to request
5847  *
5848  * Returns the number of vectors reserved, or error
5849  **/
5850 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5851 {
5852         int err = 0;
5853
5854         pf->num_msix_entries = 0;
5855         while (vectors >= I40E_MIN_MSIX) {
5856                 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5857                 if (err == 0) {
5858                         /* good to go */
5859                         pf->num_msix_entries = vectors;
5860                         break;
5861                 } else if (err < 0) {
5862                         /* total failure */
5863                         dev_info(&pf->pdev->dev,
5864                                  "MSI-X vector reservation failed: %d\n", err);
5865                         vectors = 0;
5866                         break;
5867                 } else {
5868                         /* err > 0 is the hint for retry */
5869                         dev_info(&pf->pdev->dev,
5870                                  "MSI-X vectors wanted %d, retrying with %d\n",
5871                                  vectors, err);
5872                         vectors = err;
5873                 }
5874         }
5875
5876         if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5877                 dev_info(&pf->pdev->dev,
5878                          "Couldn't get enough vectors, only %d available\n",
5879                          vectors);
5880                 vectors = 0;
5881         }
5882
5883         return vectors;
5884 }
5885
5886 /**
5887  * i40e_init_msix - Setup the MSIX capability
5888  * @pf: board private structure
5889  *
5890  * Work with the OS to set up the MSIX vectors needed.
5891  *
5892  * Returns 0 on success, negative on failure
5893  **/
5894 static int i40e_init_msix(struct i40e_pf *pf)
5895 {
5896         i40e_status err = 0;
5897         struct i40e_hw *hw = &pf->hw;
5898         int v_budget, i;
5899         int vec;
5900
5901         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5902                 return -ENODEV;
5903
5904         /* The number of vectors we'll request will be comprised of:
5905          *   - Add 1 for "other" cause for Admin Queue events, etc.
5906          *   - The number of LAN queue pairs
5907          *      - Queues being used for RSS.
5908          *              We don't need as many as max_rss_size vectors.
5909          *              use rss_size instead in the calculation since that
5910          *              is governed by number of cpus in the system.
5911          *      - assumes symmetric Tx/Rx pairing
5912          *   - The number of VMDq pairs
5913          * Once we count this up, try the request.
5914          *
5915          * If we can't get what we want, we'll simplify to nearly nothing
5916          * and try again.  If that still fails, we punt.
5917          */
5918         pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
5919         pf->num_vmdq_msix = pf->num_vmdq_qps;
5920         v_budget = 1 + pf->num_lan_msix;
5921         v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5922         if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
5923                 v_budget++;
5924
5925         /* Scale down if necessary, and the rings will share vectors */
5926         v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5927
5928         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5929                                    GFP_KERNEL);
5930         if (!pf->msix_entries)
5931                 return -ENOMEM;
5932
5933         for (i = 0; i < v_budget; i++)
5934                 pf->msix_entries[i].entry = i;
5935         vec = i40e_reserve_msix_vectors(pf, v_budget);
5936         if (vec < I40E_MIN_MSIX) {
5937                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5938                 kfree(pf->msix_entries);
5939                 pf->msix_entries = NULL;
5940                 return -ENODEV;
5941
5942         } else if (vec == I40E_MIN_MSIX) {
5943                 /* Adjust for minimal MSIX use */
5944                 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5945                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5946                 pf->num_vmdq_vsis = 0;
5947                 pf->num_vmdq_qps = 0;
5948                 pf->num_vmdq_msix = 0;
5949                 pf->num_lan_qps = 1;
5950                 pf->num_lan_msix = 1;
5951
5952         } else if (vec != v_budget) {
5953                 /* Scale vector usage down */
5954                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
5955                 vec--;                    /* reserve the misc vector */
5956
5957                 /* partition out the remaining vectors */
5958                 switch (vec) {
5959                 case 2:
5960                         pf->num_vmdq_vsis = 1;
5961                         pf->num_lan_msix = 1;
5962                         break;
5963                 case 3:
5964                         pf->num_vmdq_vsis = 1;
5965                         pf->num_lan_msix = 2;
5966                         break;
5967                 default:
5968                         pf->num_lan_msix = min_t(int, (vec / 2),
5969                                                  pf->num_lan_qps);
5970                         pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5971                                                   I40E_DEFAULT_NUM_VMDQ_VSI);
5972                         break;
5973                 }
5974         }
5975
5976         return err;
5977 }
5978
5979 /**
5980  * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
5981  * @vsi: the VSI being configured
5982  * @v_idx: index of the vector in the vsi struct
5983  *
5984  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
5985  **/
5986 static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5987 {
5988         struct i40e_q_vector *q_vector;
5989
5990         /* allocate q_vector */
5991         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
5992         if (!q_vector)
5993                 return -ENOMEM;
5994
5995         q_vector->vsi = vsi;
5996         q_vector->v_idx = v_idx;
5997         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
5998         if (vsi->netdev)
5999                 netif_napi_add(vsi->netdev, &q_vector->napi,
6000                                i40e_napi_poll, vsi->work_limit);
6001
6002         q_vector->rx.latency_range = I40E_LOW_LATENCY;
6003         q_vector->tx.latency_range = I40E_LOW_LATENCY;
6004
6005         /* tie q_vector and vsi together */
6006         vsi->q_vectors[v_idx] = q_vector;
6007
6008         return 0;
6009 }
6010
6011 /**
6012  * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
6013  * @vsi: the VSI being configured
6014  *
6015  * We allocate one q_vector per queue interrupt.  If allocation fails we
6016  * return -ENOMEM.
6017  **/
6018 static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
6019 {
6020         struct i40e_pf *pf = vsi->back;
6021         int v_idx, num_q_vectors;
6022         int err;
6023
6024         /* if not MSIX, give the one vector only to the LAN VSI */
6025         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6026                 num_q_vectors = vsi->num_q_vectors;
6027         else if (vsi == pf->vsi[pf->lan_vsi])
6028                 num_q_vectors = 1;
6029         else
6030                 return -EINVAL;
6031
6032         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
6033                 err = i40e_alloc_q_vector(vsi, v_idx);
6034                 if (err)
6035                         goto err_out;
6036         }
6037
6038         return 0;
6039
6040 err_out:
6041         while (v_idx--)
6042                 i40e_free_q_vector(vsi, v_idx);
6043
6044         return err;
6045 }
6046
6047 /**
6048  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
6049  * @pf: board private structure to initialize
6050  **/
6051 static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6052 {
6053         int err = 0;
6054
6055         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
6056                 err = i40e_init_msix(pf);
6057                 if (err) {
6058                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
6059                                        I40E_FLAG_RSS_ENABLED    |
6060                                        I40E_FLAG_DCB_ENABLED    |
6061                                        I40E_FLAG_SRIOV_ENABLED  |
6062                                        I40E_FLAG_FD_SB_ENABLED  |
6063                                        I40E_FLAG_FD_ATR_ENABLED |
6064                                        I40E_FLAG_VMDQ_ENABLED);
6065
6066                         /* rework the queue expectations without MSIX */
6067                         i40e_determine_queue_usage(pf);
6068                 }
6069         }
6070
6071         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6072             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
6073                 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
6074                 err = pci_enable_msi(pf->pdev);
6075                 if (err) {
6076                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
6077                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
6078                 }
6079         }
6080
6081         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
6082                 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
6083
6084         /* track first vector for misc interrupts */
6085         err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
6086 }
6087
6088 /**
6089  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
6090  * @pf: board private structure
6091  *
6092  * This sets up the handler for MSIX 0, which is used to manage the
6093  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
6094  * when in MSI or Legacy interrupt mode.
6095  **/
6096 static int i40e_setup_misc_vector(struct i40e_pf *pf)
6097 {
6098         struct i40e_hw *hw = &pf->hw;
6099         int err = 0;
6100
6101         /* Only request the irq if this is the first time through, and
6102          * not when we're rebuilding after a Reset
6103          */
6104         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6105                 err = request_irq(pf->msix_entries[0].vector,
6106                                   i40e_intr, 0, pf->misc_int_name, pf);
6107                 if (err) {
6108                         dev_info(&pf->pdev->dev,
6109                                  "request_irq for msix_misc failed: %d\n", err);
6110                         return -EFAULT;
6111                 }
6112         }
6113
6114         i40e_enable_misc_int_causes(hw);
6115
6116         /* associate no queues to the misc vector */
6117         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
6118         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
6119
6120         i40e_flush(hw);
6121
6122         i40e_irq_dynamic_enable_icr0(pf);
6123
6124         return err;
6125 }
6126
6127 /**
6128  * i40e_config_rss - Prepare for RSS if used
6129  * @pf: board private structure
6130  **/
6131 static int i40e_config_rss(struct i40e_pf *pf)
6132 {
6133         /* Set of random keys generated using kernel random number generator */
6134         static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
6135                                 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
6136                                 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
6137                                 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
6138         struct i40e_hw *hw = &pf->hw;
6139         u32 lut = 0;
6140         int i, j;
6141         u64 hena;
6142
6143         /* Fill out hash function seed */
6144         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
6145                 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
6146
6147         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
6148         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
6149                 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
6150         hena |= I40E_DEFAULT_RSS_HENA;
6151         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
6152         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
6153
6154         /* Populate the LUT with max no. of queues in round robin fashion */
6155         for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
6156
6157                 /* The assumption is that lan qp count will be the highest
6158                  * qp count for any PF VSI that needs RSS.
6159                  * If multiple VSIs need RSS support, all the qp counts
6160                  * for those VSIs should be a power of 2 for RSS to work.
6161                  * If LAN VSI is the only consumer for RSS then this requirement
6162                  * is not necessary.
6163                  */
6164                 if (j == pf->rss_size)
6165                         j = 0;
6166                 /* lut = 4-byte sliding window of 4 lut entries */
6167                 lut = (lut << 8) | (j &
6168                          ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
6169                 /* On i = 3, we have 4 entries in lut; write to the register */
6170                 if ((i & 3) == 3)
6171                         wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
6172         }
6173         i40e_flush(hw);
6174
6175         return 0;
6176 }
6177
6178 /**
6179  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
6180  * @pf: board private structure
6181  * @queue_count: the requested queue count for rss.
6182  *
6183  * returns 0 if rss is not enabled, if enabled returns the final rss queue
6184  * count which may be different from the requested queue count.
6185  **/
6186 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
6187 {
6188         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
6189                 return 0;
6190
6191         queue_count = min_t(int, queue_count, pf->rss_size_max);
6192         queue_count = rounddown_pow_of_two(queue_count);
6193
6194         if (queue_count != pf->rss_size) {
6195                 i40e_prep_for_reset(pf);
6196
6197                 pf->rss_size = queue_count;
6198
6199                 i40e_reset_and_rebuild(pf, true);
6200                 i40e_config_rss(pf);
6201         }
6202         dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
6203         return pf->rss_size;
6204 }
6205
6206 /**
6207  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
6208  * @pf: board private structure to initialize
6209  *
6210  * i40e_sw_init initializes the Adapter private data structure.
6211  * Fields are initialized based on PCI device information and
6212  * OS network device settings (MTU size).
6213  **/
6214 static int i40e_sw_init(struct i40e_pf *pf)
6215 {
6216         int err = 0;
6217         int size;
6218
6219         pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
6220                                 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
6221         pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
6222         if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
6223                 if (I40E_DEBUG_USER & debug)
6224                         pf->hw.debug_mask = debug;
6225                 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
6226                                                 I40E_DEFAULT_MSG_ENABLE);
6227         }
6228
6229         /* Set default capability flags */
6230         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
6231                     I40E_FLAG_MSI_ENABLED     |
6232                     I40E_FLAG_MSIX_ENABLED    |
6233                     I40E_FLAG_RX_1BUF_ENABLED;
6234
6235         /* Depending on PF configurations, it is possible that the RSS
6236          * maximum might end up larger than the available queues
6237          */
6238         pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
6239         pf->rss_size_max = min_t(int, pf->rss_size_max,
6240                                  pf->hw.func_caps.num_tx_qp);
6241         if (pf->hw.func_caps.rss) {
6242                 pf->flags |= I40E_FLAG_RSS_ENABLED;
6243                 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
6244                 pf->rss_size = rounddown_pow_of_two(pf->rss_size);
6245         } else {
6246                 pf->rss_size = 1;
6247         }
6248
6249         /* MFP mode enabled */
6250         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
6251                 pf->flags |= I40E_FLAG_MFP_ENABLED;
6252                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
6253         }
6254
6255         /* FW/NVM is not yet fixed in this regard */
6256         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
6257             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6258                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6259                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6260                 dev_info(&pf->pdev->dev,
6261                         "Flow Director ATR mode Enabled\n");
6262                 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
6263                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6264                         dev_info(&pf->pdev->dev,
6265                                  "Flow Director Side Band mode Enabled\n");
6266                 } else {
6267                         dev_info(&pf->pdev->dev,
6268                                  "Flow Director Side Band mode Disabled in MFP mode\n");
6269                 }
6270                 pf->fdir_pf_filter_count =
6271                                  pf->hw.func_caps.fd_filters_guaranteed;
6272                 pf->hw.fdir_shared_filter_count =
6273                                  pf->hw.func_caps.fd_filters_best_effort;
6274         }
6275
6276         if (pf->hw.func_caps.vmdq) {
6277                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
6278                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
6279                 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
6280         }
6281
6282 #ifdef CONFIG_PCI_IOV
6283         if (pf->hw.func_caps.num_vfs) {
6284                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
6285                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
6286                 pf->num_req_vfs = min_t(int,
6287                                         pf->hw.func_caps.num_vfs,
6288                                         I40E_MAX_VF_COUNT);
6289                 dev_info(&pf->pdev->dev,
6290                          "Number of VFs being requested for PF[%d] = %d\n",
6291                          pf->hw.pf_id, pf->num_req_vfs);
6292         }
6293 #endif /* CONFIG_PCI_IOV */
6294         pf->eeprom_version = 0xDEAD;
6295         pf->lan_veb = I40E_NO_VEB;
6296         pf->lan_vsi = I40E_NO_VSI;
6297
6298         /* set up queue assignment tracking */
6299         size = sizeof(struct i40e_lump_tracking)
6300                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
6301         pf->qp_pile = kzalloc(size, GFP_KERNEL);
6302         if (!pf->qp_pile) {
6303                 err = -ENOMEM;
6304                 goto sw_init_done;
6305         }
6306         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
6307         pf->qp_pile->search_hint = 0;
6308
6309         /* set up vector assignment tracking */
6310         size = sizeof(struct i40e_lump_tracking)
6311                 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
6312         pf->irq_pile = kzalloc(size, GFP_KERNEL);
6313         if (!pf->irq_pile) {
6314                 kfree(pf->qp_pile);
6315                 err = -ENOMEM;
6316                 goto sw_init_done;
6317         }
6318         pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
6319         pf->irq_pile->search_hint = 0;
6320
6321         mutex_init(&pf->switch_mutex);
6322
6323 sw_init_done:
6324         return err;
6325 }
6326
6327 /**
6328  * i40e_set_features - set the netdev feature flags
6329  * @netdev: ptr to the netdev being adjusted
6330  * @features: the feature set that the stack is suggesting
6331  **/
6332 static int i40e_set_features(struct net_device *netdev,
6333                              netdev_features_t features)
6334 {
6335         struct i40e_netdev_priv *np = netdev_priv(netdev);
6336         struct i40e_vsi *vsi = np->vsi;
6337
6338         if (features & NETIF_F_HW_VLAN_CTAG_RX)
6339                 i40e_vlan_stripping_enable(vsi);
6340         else
6341                 i40e_vlan_stripping_disable(vsi);
6342
6343         return 0;
6344 }
6345
6346 #ifdef CONFIG_I40E_VXLAN
6347 /**
6348  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
6349  * @pf: board private structure
6350  * @port: The UDP port to look up
6351  *
6352  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
6353  **/
6354 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
6355 {
6356         u8 i;
6357
6358         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6359                 if (pf->vxlan_ports[i] == port)
6360                         return i;
6361         }
6362
6363         return i;
6364 }
6365
6366 /**
6367  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
6368  * @netdev: This physical port's netdev
6369  * @sa_family: Socket Family that VXLAN is notifying us about
6370  * @port: New UDP port number that VXLAN started listening to
6371  **/
6372 static void i40e_add_vxlan_port(struct net_device *netdev,
6373                                 sa_family_t sa_family, __be16 port)
6374 {
6375         struct i40e_netdev_priv *np = netdev_priv(netdev);
6376         struct i40e_vsi *vsi = np->vsi;
6377         struct i40e_pf *pf = vsi->back;
6378         u8 next_idx;
6379         u8 idx;
6380
6381         if (sa_family == AF_INET6)
6382                 return;
6383
6384         idx = i40e_get_vxlan_port_idx(pf, port);
6385
6386         /* Check if port already exists */
6387         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6388                 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
6389                 return;
6390         }
6391
6392         /* Now check if there is space to add the new port */
6393         next_idx = i40e_get_vxlan_port_idx(pf, 0);
6394
6395         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6396                 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
6397                             ntohs(port));
6398                 return;
6399         }
6400
6401         /* New port: add it and mark its index in the bitmap */
6402         pf->vxlan_ports[next_idx] = port;
6403         pf->pending_vxlan_bitmap |= (1 << next_idx);
6404
6405         pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6406 }
6407
6408 /**
6409  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
6410  * @netdev: This physical port's netdev
6411  * @sa_family: Socket Family that VXLAN is notifying us about
6412  * @port: UDP port number that VXLAN stopped listening to
6413  **/
6414 static void i40e_del_vxlan_port(struct net_device *netdev,
6415                                 sa_family_t sa_family, __be16 port)
6416 {
6417         struct i40e_netdev_priv *np = netdev_priv(netdev);
6418         struct i40e_vsi *vsi = np->vsi;
6419         struct i40e_pf *pf = vsi->back;
6420         u8 idx;
6421
6422         if (sa_family == AF_INET6)
6423                 return;
6424
6425         idx = i40e_get_vxlan_port_idx(pf, port);
6426
6427         /* Check if port already exists */
6428         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6429                 /* if port exists, set it to 0 (mark for deletion)
6430                  * and make it pending
6431                  */
6432                 pf->vxlan_ports[idx] = 0;
6433
6434                 pf->pending_vxlan_bitmap |= (1 << idx);
6435
6436                 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6437         } else {
6438                 netdev_warn(netdev, "Port %d was not found, not deleting\n",
6439                             ntohs(port));
6440         }
6441 }
6442
6443 #endif
6444 static const struct net_device_ops i40e_netdev_ops = {
6445         .ndo_open               = i40e_open,
6446         .ndo_stop               = i40e_close,
6447         .ndo_start_xmit         = i40e_lan_xmit_frame,
6448         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
6449         .ndo_set_rx_mode        = i40e_set_rx_mode,
6450         .ndo_validate_addr      = eth_validate_addr,
6451         .ndo_set_mac_address    = i40e_set_mac,
6452         .ndo_change_mtu         = i40e_change_mtu,
6453         .ndo_do_ioctl           = i40e_ioctl,
6454         .ndo_tx_timeout         = i40e_tx_timeout,
6455         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
6456         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
6457 #ifdef CONFIG_NET_POLL_CONTROLLER
6458         .ndo_poll_controller    = i40e_netpoll,
6459 #endif
6460         .ndo_setup_tc           = i40e_setup_tc,
6461         .ndo_set_features       = i40e_set_features,
6462         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
6463         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
6464         .ndo_set_vf_tx_rate     = i40e_ndo_set_vf_bw,
6465         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
6466 #ifdef CONFIG_I40E_VXLAN
6467         .ndo_add_vxlan_port     = i40e_add_vxlan_port,
6468         .ndo_del_vxlan_port     = i40e_del_vxlan_port,
6469 #endif
6470 };
6471
6472 /**
6473  * i40e_config_netdev - Setup the netdev flags
6474  * @vsi: the VSI being configured
6475  *
6476  * Returns 0 on success, negative value on failure
6477  **/
6478 static int i40e_config_netdev(struct i40e_vsi *vsi)
6479 {
6480         u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
6481         struct i40e_pf *pf = vsi->back;
6482         struct i40e_hw *hw = &pf->hw;
6483         struct i40e_netdev_priv *np;
6484         struct net_device *netdev;
6485         u8 mac_addr[ETH_ALEN];
6486         int etherdev_size;
6487
6488         etherdev_size = sizeof(struct i40e_netdev_priv);
6489         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
6490         if (!netdev)
6491                 return -ENOMEM;
6492
6493         vsi->netdev = netdev;
6494         np = netdev_priv(netdev);
6495         np->vsi = vsi;
6496
6497         netdev->hw_enc_features = NETIF_F_IP_CSUM        |
6498                                   NETIF_F_GSO_UDP_TUNNEL |
6499                                   NETIF_F_TSO            |
6500                                   NETIF_F_SG;
6501
6502         netdev->features = NETIF_F_SG                  |
6503                            NETIF_F_IP_CSUM             |
6504                            NETIF_F_SCTP_CSUM           |
6505                            NETIF_F_HIGHDMA             |
6506                            NETIF_F_GSO_UDP_TUNNEL      |
6507                            NETIF_F_HW_VLAN_CTAG_TX     |
6508                            NETIF_F_HW_VLAN_CTAG_RX     |
6509                            NETIF_F_HW_VLAN_CTAG_FILTER |
6510                            NETIF_F_IPV6_CSUM           |
6511                            NETIF_F_TSO                 |
6512                            NETIF_F_TSO6                |
6513                            NETIF_F_RXCSUM              |
6514                            NETIF_F_RXHASH              |
6515                            0;
6516
6517         /* copy netdev features into list of user selectable features */
6518         netdev->hw_features |= netdev->features;
6519
6520         if (vsi->type == I40E_VSI_MAIN) {
6521                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
6522                 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
6523         } else {
6524                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
6525                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
6526                          pf->vsi[pf->lan_vsi]->netdev->name);
6527                 random_ether_addr(mac_addr);
6528                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
6529         }
6530         i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
6531
6532         memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
6533         memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
6534         /* vlan gets same features (except vlan offload)
6535          * after any tweaks for specific VSI types
6536          */
6537         netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
6538                                                      NETIF_F_HW_VLAN_CTAG_RX |
6539                                                    NETIF_F_HW_VLAN_CTAG_FILTER);
6540         netdev->priv_flags |= IFF_UNICAST_FLT;
6541         netdev->priv_flags |= IFF_SUPP_NOFCS;
6542         /* Setup netdev TC information */
6543         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
6544
6545         netdev->netdev_ops = &i40e_netdev_ops;
6546         netdev->watchdog_timeo = 5 * HZ;
6547         i40e_set_ethtool_ops(netdev);
6548
6549         return 0;
6550 }
6551
6552 /**
6553  * i40e_vsi_delete - Delete a VSI from the switch
6554  * @vsi: the VSI being removed
6555  *
6556  * Returns 0 on success, negative value on failure
6557  **/
6558 static void i40e_vsi_delete(struct i40e_vsi *vsi)
6559 {
6560         /* remove default VSI is not allowed */
6561         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
6562                 return;
6563
6564         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
6565         return;
6566 }
6567
6568 /**
6569  * i40e_add_vsi - Add a VSI to the switch
6570  * @vsi: the VSI being configured
6571  *
6572  * This initializes a VSI context depending on the VSI type to be added and
6573  * passes it down to the add_vsi aq command.
6574  **/
6575 static int i40e_add_vsi(struct i40e_vsi *vsi)
6576 {
6577         int ret = -ENODEV;
6578         struct i40e_mac_filter *f, *ftmp;
6579         struct i40e_pf *pf = vsi->back;
6580         struct i40e_hw *hw = &pf->hw;
6581         struct i40e_vsi_context ctxt;
6582         u8 enabled_tc = 0x1; /* TC0 enabled */
6583         int f_count = 0;
6584
6585         memset(&ctxt, 0, sizeof(ctxt));
6586         switch (vsi->type) {
6587         case I40E_VSI_MAIN:
6588                 /* The PF's main VSI is already setup as part of the
6589                  * device initialization, so we'll not bother with
6590                  * the add_vsi call, but we will retrieve the current
6591                  * VSI context.
6592                  */
6593                 ctxt.seid = pf->main_vsi_seid;
6594                 ctxt.pf_num = pf->hw.pf_id;
6595                 ctxt.vf_num = 0;
6596                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6597                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6598                 if (ret) {
6599                         dev_info(&pf->pdev->dev,
6600                                  "couldn't get pf vsi config, err %d, aq_err %d\n",
6601                                  ret, pf->hw.aq.asq_last_status);
6602                         return -ENOENT;
6603                 }
6604                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6605                 vsi->info.valid_sections = 0;
6606
6607                 vsi->seid = ctxt.seid;
6608                 vsi->id = ctxt.vsi_number;
6609
6610                 enabled_tc = i40e_pf_get_tc_map(pf);
6611
6612                 /* MFP mode setup queue map and update VSI */
6613                 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6614                         memset(&ctxt, 0, sizeof(ctxt));
6615                         ctxt.seid = pf->main_vsi_seid;
6616                         ctxt.pf_num = pf->hw.pf_id;
6617                         ctxt.vf_num = 0;
6618                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
6619                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6620                         if (ret) {
6621                                 dev_info(&pf->pdev->dev,
6622                                          "update vsi failed, aq_err=%d\n",
6623                                          pf->hw.aq.asq_last_status);
6624                                 ret = -ENOENT;
6625                                 goto err;
6626                         }
6627                         /* update the local VSI info queue map */
6628                         i40e_vsi_update_queue_map(vsi, &ctxt);
6629                         vsi->info.valid_sections = 0;
6630                 } else {
6631                         /* Default/Main VSI is only enabled for TC0
6632                          * reconfigure it to enable all TCs that are
6633                          * available on the port in SFP mode.
6634                          */
6635                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
6636                         if (ret) {
6637                                 dev_info(&pf->pdev->dev,
6638                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
6639                                          enabled_tc, ret,
6640                                          pf->hw.aq.asq_last_status);
6641                                 ret = -ENOENT;
6642                         }
6643                 }
6644                 break;
6645
6646         case I40E_VSI_FDIR:
6647                 ctxt.pf_num = hw->pf_id;
6648                 ctxt.vf_num = 0;
6649                 ctxt.uplink_seid = vsi->uplink_seid;
6650                 ctxt.connection_type = 0x1;     /* regular data port */
6651                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6652                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6653                 break;
6654
6655         case I40E_VSI_VMDQ2:
6656                 ctxt.pf_num = hw->pf_id;
6657                 ctxt.vf_num = 0;
6658                 ctxt.uplink_seid = vsi->uplink_seid;
6659                 ctxt.connection_type = 0x1;     /* regular data port */
6660                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6661
6662                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6663
6664                 /* This VSI is connected to VEB so the switch_id
6665                  * should be set to zero by default.
6666                  */
6667                 ctxt.info.switch_id = 0;
6668                 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6669                 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6670
6671                 /* Setup the VSI tx/rx queue map for TC0 only for now */
6672                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6673                 break;
6674
6675         case I40E_VSI_SRIOV:
6676                 ctxt.pf_num = hw->pf_id;
6677                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
6678                 ctxt.uplink_seid = vsi->uplink_seid;
6679                 ctxt.connection_type = 0x1;     /* regular data port */
6680                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6681
6682                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6683
6684                 /* This VSI is connected to VEB so the switch_id
6685                  * should be set to zero by default.
6686                  */
6687                 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6688
6689                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6690                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6691                 /* Setup the VSI tx/rx queue map for TC0 only for now */
6692                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6693                 break;
6694
6695         default:
6696                 return -ENODEV;
6697         }
6698
6699         if (vsi->type != I40E_VSI_MAIN) {
6700                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6701                 if (ret) {
6702                         dev_info(&vsi->back->pdev->dev,
6703                                  "add vsi failed, aq_err=%d\n",
6704                                  vsi->back->hw.aq.asq_last_status);
6705                         ret = -ENOENT;
6706                         goto err;
6707                 }
6708                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6709                 vsi->info.valid_sections = 0;
6710                 vsi->seid = ctxt.seid;
6711                 vsi->id = ctxt.vsi_number;
6712         }
6713
6714         /* If macvlan filters already exist, force them to get loaded */
6715         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
6716                 f->changed = true;
6717                 f_count++;
6718         }
6719         if (f_count) {
6720                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
6721                 pf->flags |= I40E_FLAG_FILTER_SYNC;
6722         }
6723
6724         /* Update VSI BW information */
6725         ret = i40e_vsi_get_bw_info(vsi);
6726         if (ret) {
6727                 dev_info(&pf->pdev->dev,
6728                          "couldn't get vsi bw info, err %d, aq_err %d\n",
6729                          ret, pf->hw.aq.asq_last_status);
6730                 /* VSI is already added so not tearing that up */
6731                 ret = 0;
6732         }
6733
6734 err:
6735         return ret;
6736 }
6737
6738 /**
6739  * i40e_vsi_release - Delete a VSI and free its resources
6740  * @vsi: the VSI being removed
6741  *
6742  * Returns 0 on success or < 0 on error
6743  **/
6744 int i40e_vsi_release(struct i40e_vsi *vsi)
6745 {
6746         struct i40e_mac_filter *f, *ftmp;
6747         struct i40e_veb *veb = NULL;
6748         struct i40e_pf *pf;
6749         u16 uplink_seid;
6750         int i, n;
6751
6752         pf = vsi->back;
6753
6754         /* release of a VEB-owner or last VSI is not allowed */
6755         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
6756                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
6757                          vsi->seid, vsi->uplink_seid);
6758                 return -ENODEV;
6759         }
6760         if (vsi == pf->vsi[pf->lan_vsi] &&
6761             !test_bit(__I40E_DOWN, &pf->state)) {
6762                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
6763                 return -ENODEV;
6764         }
6765
6766         uplink_seid = vsi->uplink_seid;
6767         if (vsi->type != I40E_VSI_SRIOV) {
6768                 if (vsi->netdev_registered) {
6769                         vsi->netdev_registered = false;
6770                         if (vsi->netdev) {
6771                                 /* results in a call to i40e_close() */
6772                                 unregister_netdev(vsi->netdev);
6773                                 free_netdev(vsi->netdev);
6774                                 vsi->netdev = NULL;
6775                         }
6776                 } else {
6777                         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
6778                                 i40e_down(vsi);
6779                         i40e_vsi_free_irq(vsi);
6780                         i40e_vsi_free_tx_resources(vsi);
6781                         i40e_vsi_free_rx_resources(vsi);
6782                 }
6783                 i40e_vsi_disable_irq(vsi);
6784         }
6785
6786         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
6787                 i40e_del_filter(vsi, f->macaddr, f->vlan,
6788                                 f->is_vf, f->is_netdev);
6789         i40e_sync_vsi_filters(vsi);
6790
6791         i40e_vsi_delete(vsi);
6792         i40e_vsi_free_q_vectors(vsi);
6793         i40e_vsi_clear_rings(vsi);
6794         i40e_vsi_clear(vsi);
6795
6796         /* If this was the last thing on the VEB, except for the
6797          * controlling VSI, remove the VEB, which puts the controlling
6798          * VSI onto the next level down in the switch.
6799          *
6800          * Well, okay, there's one more exception here: don't remove
6801          * the orphan VEBs yet.  We'll wait for an explicit remove request
6802          * from up the network stack.
6803          */
6804         for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6805                 if (pf->vsi[i] &&
6806                     pf->vsi[i]->uplink_seid == uplink_seid &&
6807                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6808                         n++;      /* count the VSIs */
6809                 }
6810         }
6811         for (i = 0; i < I40E_MAX_VEB; i++) {
6812                 if (!pf->veb[i])
6813                         continue;
6814                 if (pf->veb[i]->uplink_seid == uplink_seid)
6815                         n++;     /* count the VEBs */
6816                 if (pf->veb[i]->seid == uplink_seid)
6817                         veb = pf->veb[i];
6818         }
6819         if (n == 0 && veb && veb->uplink_seid != 0)
6820                 i40e_veb_release(veb);
6821
6822         return 0;
6823 }
6824
6825 /**
6826  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
6827  * @vsi: ptr to the VSI
6828  *
6829  * This should only be called after i40e_vsi_mem_alloc() which allocates the
6830  * corresponding SW VSI structure and initializes num_queue_pairs for the
6831  * newly allocated VSI.
6832  *
6833  * Returns 0 on success or negative on failure
6834  **/
6835 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6836 {
6837         int ret = -ENOENT;
6838         struct i40e_pf *pf = vsi->back;
6839
6840         if (vsi->q_vectors[0]) {
6841                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
6842                          vsi->seid);
6843                 return -EEXIST;
6844         }
6845
6846         if (vsi->base_vector) {
6847                 dev_info(&pf->pdev->dev,
6848                          "VSI %d has non-zero base vector %d\n",
6849                          vsi->seid, vsi->base_vector);
6850                 return -EEXIST;
6851         }
6852
6853         ret = i40e_alloc_q_vectors(vsi);
6854         if (ret) {
6855                 dev_info(&pf->pdev->dev,
6856                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
6857                          vsi->num_q_vectors, vsi->seid, ret);
6858                 vsi->num_q_vectors = 0;
6859                 goto vector_setup_out;
6860         }
6861
6862         if (vsi->num_q_vectors)
6863                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6864                                                  vsi->num_q_vectors, vsi->idx);
6865         if (vsi->base_vector < 0) {
6866                 dev_info(&pf->pdev->dev,
6867                          "failed to get q tracking for VSI %d, err=%d\n",
6868                          vsi->seid, vsi->base_vector);
6869                 i40e_vsi_free_q_vectors(vsi);
6870                 ret = -ENOENT;
6871                 goto vector_setup_out;
6872         }
6873
6874 vector_setup_out:
6875         return ret;
6876 }
6877
6878 /**
6879  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
6880  * @vsi: pointer to the vsi.
6881  *
6882  * This re-allocates a vsi's queue resources.
6883  *
6884  * Returns pointer to the successfully allocated and configured VSI sw struct
6885  * on success, otherwise returns NULL on failure.
6886  **/
6887 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
6888 {
6889         struct i40e_pf *pf = vsi->back;
6890         u8 enabled_tc;
6891         int ret;
6892
6893         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6894         i40e_vsi_clear_rings(vsi);
6895
6896         i40e_vsi_free_arrays(vsi, false);
6897         i40e_set_num_rings_in_vsi(vsi);
6898         ret = i40e_vsi_alloc_arrays(vsi, false);
6899         if (ret)
6900                 goto err_vsi;
6901
6902         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6903         if (ret < 0) {
6904                 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6905                          vsi->seid, ret);
6906                 goto err_vsi;
6907         }
6908         vsi->base_queue = ret;
6909
6910         /* Update the FW view of the VSI. Force a reset of TC and queue
6911          * layout configurations.
6912          */
6913         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6914         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6915         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6916         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6917
6918         /* assign it some queues */
6919         ret = i40e_alloc_rings(vsi);
6920         if (ret)
6921                 goto err_rings;
6922
6923         /* map all of the rings to the q_vectors */
6924         i40e_vsi_map_rings_to_vectors(vsi);
6925         return vsi;
6926
6927 err_rings:
6928         i40e_vsi_free_q_vectors(vsi);
6929         if (vsi->netdev_registered) {
6930                 vsi->netdev_registered = false;
6931                 unregister_netdev(vsi->netdev);
6932                 free_netdev(vsi->netdev);
6933                 vsi->netdev = NULL;
6934         }
6935         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6936 err_vsi:
6937         i40e_vsi_clear(vsi);
6938         return NULL;
6939 }
6940
6941 /**
6942  * i40e_vsi_setup - Set up a VSI by a given type
6943  * @pf: board private structure
6944  * @type: VSI type
6945  * @uplink_seid: the switch element to link to
6946  * @param1: usage depends upon VSI type. For VF types, indicates VF id
6947  *
6948  * This allocates the sw VSI structure and its queue resources, then add a VSI
6949  * to the identified VEB.
6950  *
6951  * Returns pointer to the successfully allocated and configure VSI sw struct on
6952  * success, otherwise returns NULL on failure.
6953  **/
6954 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6955                                 u16 uplink_seid, u32 param1)
6956 {
6957         struct i40e_vsi *vsi = NULL;
6958         struct i40e_veb *veb = NULL;
6959         int ret, i;
6960         int v_idx;
6961
6962         /* The requested uplink_seid must be either
6963          *     - the PF's port seid
6964          *              no VEB is needed because this is the PF
6965          *              or this is a Flow Director special case VSI
6966          *     - seid of an existing VEB
6967          *     - seid of a VSI that owns an existing VEB
6968          *     - seid of a VSI that doesn't own a VEB
6969          *              a new VEB is created and the VSI becomes the owner
6970          *     - seid of the PF VSI, which is what creates the first VEB
6971          *              this is a special case of the previous
6972          *
6973          * Find which uplink_seid we were given and create a new VEB if needed
6974          */
6975         for (i = 0; i < I40E_MAX_VEB; i++) {
6976                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6977                         veb = pf->veb[i];
6978                         break;
6979                 }
6980         }
6981
6982         if (!veb && uplink_seid != pf->mac_seid) {
6983
6984                 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6985                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6986                                 vsi = pf->vsi[i];
6987                                 break;
6988                         }
6989                 }
6990                 if (!vsi) {
6991                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6992                                  uplink_seid);
6993                         return NULL;
6994                 }
6995
6996                 if (vsi->uplink_seid == pf->mac_seid)
6997                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6998                                              vsi->tc_config.enabled_tc);
6999                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
7000                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
7001                                              vsi->tc_config.enabled_tc);
7002
7003                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
7004                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
7005                                 veb = pf->veb[i];
7006                 }
7007                 if (!veb) {
7008                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
7009                         return NULL;
7010                 }
7011
7012                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7013                 uplink_seid = veb->seid;
7014         }
7015
7016         /* get vsi sw struct */
7017         v_idx = i40e_vsi_mem_alloc(pf, type);
7018         if (v_idx < 0)
7019                 goto err_alloc;
7020         vsi = pf->vsi[v_idx];
7021         if (!vsi)
7022                 goto err_alloc;
7023         vsi->type = type;
7024         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
7025
7026         if (type == I40E_VSI_MAIN)
7027                 pf->lan_vsi = v_idx;
7028         else if (type == I40E_VSI_SRIOV)
7029                 vsi->vf_id = param1;
7030         /* assign it some queues */
7031         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
7032                                 vsi->idx);
7033         if (ret < 0) {
7034                 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
7035                          vsi->seid, ret);
7036                 goto err_vsi;
7037         }
7038         vsi->base_queue = ret;
7039
7040         /* get a VSI from the hardware */
7041         vsi->uplink_seid = uplink_seid;
7042         ret = i40e_add_vsi(vsi);
7043         if (ret)
7044                 goto err_vsi;
7045
7046         switch (vsi->type) {
7047         /* setup the netdev if needed */
7048         case I40E_VSI_MAIN:
7049         case I40E_VSI_VMDQ2:
7050                 ret = i40e_config_netdev(vsi);
7051                 if (ret)
7052                         goto err_netdev;
7053                 ret = register_netdev(vsi->netdev);
7054                 if (ret)
7055                         goto err_netdev;
7056                 vsi->netdev_registered = true;
7057                 netif_carrier_off(vsi->netdev);
7058 #ifdef CONFIG_I40E_DCB
7059                 /* Setup DCB netlink interface */
7060                 i40e_dcbnl_setup(vsi);
7061 #endif /* CONFIG_I40E_DCB */
7062                 /* fall through */
7063
7064         case I40E_VSI_FDIR:
7065                 /* set up vectors and rings if needed */
7066                 ret = i40e_vsi_setup_vectors(vsi);
7067                 if (ret)
7068                         goto err_msix;
7069
7070                 ret = i40e_alloc_rings(vsi);
7071                 if (ret)
7072                         goto err_rings;
7073
7074                 /* map all of the rings to the q_vectors */
7075                 i40e_vsi_map_rings_to_vectors(vsi);
7076
7077                 i40e_vsi_reset_stats(vsi);
7078                 break;
7079
7080         default:
7081                 /* no netdev or rings for the other VSI types */
7082                 break;
7083         }
7084
7085         return vsi;
7086
7087 err_rings:
7088         i40e_vsi_free_q_vectors(vsi);
7089 err_msix:
7090         if (vsi->netdev_registered) {
7091                 vsi->netdev_registered = false;
7092                 unregister_netdev(vsi->netdev);
7093                 free_netdev(vsi->netdev);
7094                 vsi->netdev = NULL;
7095         }
7096 err_netdev:
7097         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
7098 err_vsi:
7099         i40e_vsi_clear(vsi);
7100 err_alloc:
7101         return NULL;
7102 }
7103
7104 /**
7105  * i40e_veb_get_bw_info - Query VEB BW information
7106  * @veb: the veb to query
7107  *
7108  * Query the Tx scheduler BW configuration data for given VEB
7109  **/
7110 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
7111 {
7112         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
7113         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
7114         struct i40e_pf *pf = veb->pf;
7115         struct i40e_hw *hw = &pf->hw;
7116         u32 tc_bw_max;
7117         int ret = 0;
7118         int i;
7119
7120         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
7121                                                   &bw_data, NULL);
7122         if (ret) {
7123                 dev_info(&pf->pdev->dev,
7124                          "query veb bw config failed, aq_err=%d\n",
7125                          hw->aq.asq_last_status);
7126                 goto out;
7127         }
7128
7129         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
7130                                                    &ets_data, NULL);
7131         if (ret) {
7132                 dev_info(&pf->pdev->dev,
7133                          "query veb bw ets config failed, aq_err=%d\n",
7134                          hw->aq.asq_last_status);
7135                 goto out;
7136         }
7137
7138         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
7139         veb->bw_max_quanta = ets_data.tc_bw_max;
7140         veb->is_abs_credits = bw_data.absolute_credits_enable;
7141         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
7142                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
7143         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7144                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
7145                 veb->bw_tc_limit_credits[i] =
7146                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
7147                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
7148         }
7149
7150 out:
7151         return ret;
7152 }
7153
7154 /**
7155  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
7156  * @pf: board private structure
7157  *
7158  * On error: returns error code (negative)
7159  * On success: returns vsi index in PF (positive)
7160  **/
7161 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
7162 {
7163         int ret = -ENOENT;
7164         struct i40e_veb *veb;
7165         int i;
7166
7167         /* Need to protect the allocation of switch elements at the PF level */
7168         mutex_lock(&pf->switch_mutex);
7169
7170         /* VEB list may be fragmented if VEB creation/destruction has
7171          * been happening.  We can afford to do a quick scan to look
7172          * for any free slots in the list.
7173          *
7174          * find next empty veb slot, looping back around if necessary
7175          */
7176         i = 0;
7177         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
7178                 i++;
7179         if (i >= I40E_MAX_VEB) {
7180                 ret = -ENOMEM;
7181                 goto err_alloc_veb;  /* out of VEB slots! */
7182         }
7183
7184         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
7185         if (!veb) {
7186                 ret = -ENOMEM;
7187                 goto err_alloc_veb;
7188         }
7189         veb->pf = pf;
7190         veb->idx = i;
7191         veb->enabled_tc = 1;
7192
7193         pf->veb[i] = veb;
7194         ret = i;
7195 err_alloc_veb:
7196         mutex_unlock(&pf->switch_mutex);
7197         return ret;
7198 }
7199
7200 /**
7201  * i40e_switch_branch_release - Delete a branch of the switch tree
7202  * @branch: where to start deleting
7203  *
7204  * This uses recursion to find the tips of the branch to be
7205  * removed, deleting until we get back to and can delete this VEB.
7206  **/
7207 static void i40e_switch_branch_release(struct i40e_veb *branch)
7208 {
7209         struct i40e_pf *pf = branch->pf;
7210         u16 branch_seid = branch->seid;
7211         u16 veb_idx = branch->idx;
7212         int i;
7213
7214         /* release any VEBs on this VEB - RECURSION */
7215         for (i = 0; i < I40E_MAX_VEB; i++) {
7216                 if (!pf->veb[i])
7217                         continue;
7218                 if (pf->veb[i]->uplink_seid == branch->seid)
7219                         i40e_switch_branch_release(pf->veb[i]);
7220         }
7221
7222         /* Release the VSIs on this VEB, but not the owner VSI.
7223          *
7224          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
7225          *       the VEB itself, so don't use (*branch) after this loop.
7226          */
7227         for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7228                 if (!pf->vsi[i])
7229                         continue;
7230                 if (pf->vsi[i]->uplink_seid == branch_seid &&
7231                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
7232                         i40e_vsi_release(pf->vsi[i]);
7233                 }
7234         }
7235
7236         /* There's one corner case where the VEB might not have been
7237          * removed, so double check it here and remove it if needed.
7238          * This case happens if the veb was created from the debugfs
7239          * commands and no VSIs were added to it.
7240          */
7241         if (pf->veb[veb_idx])
7242                 i40e_veb_release(pf->veb[veb_idx]);
7243 }
7244
7245 /**
7246  * i40e_veb_clear - remove veb struct
7247  * @veb: the veb to remove
7248  **/
7249 static void i40e_veb_clear(struct i40e_veb *veb)
7250 {
7251         if (!veb)
7252                 return;
7253
7254         if (veb->pf) {
7255                 struct i40e_pf *pf = veb->pf;
7256
7257                 mutex_lock(&pf->switch_mutex);
7258                 if (pf->veb[veb->idx] == veb)
7259                         pf->veb[veb->idx] = NULL;
7260                 mutex_unlock(&pf->switch_mutex);
7261         }
7262
7263         kfree(veb);
7264 }
7265
7266 /**
7267  * i40e_veb_release - Delete a VEB and free its resources
7268  * @veb: the VEB being removed
7269  **/
7270 void i40e_veb_release(struct i40e_veb *veb)
7271 {
7272         struct i40e_vsi *vsi = NULL;
7273         struct i40e_pf *pf;
7274         int i, n = 0;
7275
7276         pf = veb->pf;
7277
7278         /* find the remaining VSI and check for extras */
7279         for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7280                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
7281                         n++;
7282                         vsi = pf->vsi[i];
7283                 }
7284         }
7285         if (n != 1) {
7286                 dev_info(&pf->pdev->dev,
7287                          "can't remove VEB %d with %d VSIs left\n",
7288                          veb->seid, n);
7289                 return;
7290         }
7291
7292         /* move the remaining VSI to uplink veb */
7293         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
7294         if (veb->uplink_seid) {
7295                 vsi->uplink_seid = veb->uplink_seid;
7296                 if (veb->uplink_seid == pf->mac_seid)
7297                         vsi->veb_idx = I40E_NO_VEB;
7298                 else
7299                         vsi->veb_idx = veb->veb_idx;
7300         } else {
7301                 /* floating VEB */
7302                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
7303                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
7304         }
7305
7306         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
7307         i40e_veb_clear(veb);
7308
7309         return;
7310 }
7311
7312 /**
7313  * i40e_add_veb - create the VEB in the switch
7314  * @veb: the VEB to be instantiated
7315  * @vsi: the controlling VSI
7316  **/
7317 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
7318 {
7319         bool is_default = false;
7320         bool is_cloud = false;
7321         int ret;
7322
7323         /* get a VEB from the hardware */
7324         ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
7325                               veb->enabled_tc, is_default,
7326                               is_cloud, &veb->seid, NULL);
7327         if (ret) {
7328                 dev_info(&veb->pf->pdev->dev,
7329                          "couldn't add VEB, err %d, aq_err %d\n",
7330                          ret, veb->pf->hw.aq.asq_last_status);
7331                 return -EPERM;
7332         }
7333
7334         /* get statistics counter */
7335         ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
7336                                          &veb->stats_idx, NULL, NULL, NULL);
7337         if (ret) {
7338                 dev_info(&veb->pf->pdev->dev,
7339                          "couldn't get VEB statistics idx, err %d, aq_err %d\n",
7340                          ret, veb->pf->hw.aq.asq_last_status);
7341                 return -EPERM;
7342         }
7343         ret = i40e_veb_get_bw_info(veb);
7344         if (ret) {
7345                 dev_info(&veb->pf->pdev->dev,
7346                          "couldn't get VEB bw info, err %d, aq_err %d\n",
7347                          ret, veb->pf->hw.aq.asq_last_status);
7348                 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
7349                 return -ENOENT;
7350         }
7351
7352         vsi->uplink_seid = veb->seid;
7353         vsi->veb_idx = veb->idx;
7354         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
7355
7356         return 0;
7357 }
7358
7359 /**
7360  * i40e_veb_setup - Set up a VEB
7361  * @pf: board private structure
7362  * @flags: VEB setup flags
7363  * @uplink_seid: the switch element to link to
7364  * @vsi_seid: the initial VSI seid
7365  * @enabled_tc: Enabled TC bit-map
7366  *
7367  * This allocates the sw VEB structure and links it into the switch
7368  * It is possible and legal for this to be a duplicate of an already
7369  * existing VEB.  It is also possible for both uplink and vsi seids
7370  * to be zero, in order to create a floating VEB.
7371  *
7372  * Returns pointer to the successfully allocated VEB sw struct on
7373  * success, otherwise returns NULL on failure.
7374  **/
7375 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
7376                                 u16 uplink_seid, u16 vsi_seid,
7377                                 u8 enabled_tc)
7378 {
7379         struct i40e_veb *veb, *uplink_veb = NULL;
7380         int vsi_idx, veb_idx;
7381         int ret;
7382
7383         /* if one seid is 0, the other must be 0 to create a floating relay */
7384         if ((uplink_seid == 0 || vsi_seid == 0) &&
7385             (uplink_seid + vsi_seid != 0)) {
7386                 dev_info(&pf->pdev->dev,
7387                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
7388                          uplink_seid, vsi_seid);
7389                 return NULL;
7390         }
7391
7392         /* make sure there is such a vsi and uplink */
7393         for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
7394                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
7395                         break;
7396         if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
7397                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
7398                          vsi_seid);
7399                 return NULL;
7400         }
7401
7402         if (uplink_seid && uplink_seid != pf->mac_seid) {
7403                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
7404                         if (pf->veb[veb_idx] &&
7405                             pf->veb[veb_idx]->seid == uplink_seid) {
7406                                 uplink_veb = pf->veb[veb_idx];
7407                                 break;
7408                         }
7409                 }
7410                 if (!uplink_veb) {
7411                         dev_info(&pf->pdev->dev,
7412                                  "uplink seid %d not found\n", uplink_seid);
7413                         return NULL;
7414                 }
7415         }
7416
7417         /* get veb sw struct */
7418         veb_idx = i40e_veb_mem_alloc(pf);
7419         if (veb_idx < 0)
7420                 goto err_alloc;
7421         veb = pf->veb[veb_idx];
7422         veb->flags = flags;
7423         veb->uplink_seid = uplink_seid;
7424         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
7425         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
7426
7427         /* create the VEB in the switch */
7428         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7429         if (ret)
7430                 goto err_veb;
7431
7432         return veb;
7433
7434 err_veb:
7435         i40e_veb_clear(veb);
7436 err_alloc:
7437         return NULL;
7438 }
7439
7440 /**
7441  * i40e_setup_pf_switch_element - set pf vars based on switch type
7442  * @pf: board private structure
7443  * @ele: element we are building info from
7444  * @num_reported: total number of elements
7445  * @printconfig: should we print the contents
7446  *
7447  * helper function to assist in extracting a few useful SEID values.
7448  **/
7449 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
7450                                 struct i40e_aqc_switch_config_element_resp *ele,
7451                                 u16 num_reported, bool printconfig)
7452 {
7453         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
7454         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
7455         u8 element_type = ele->element_type;
7456         u16 seid = le16_to_cpu(ele->seid);
7457
7458         if (printconfig)
7459                 dev_info(&pf->pdev->dev,
7460                          "type=%d seid=%d uplink=%d downlink=%d\n",
7461                          element_type, seid, uplink_seid, downlink_seid);
7462
7463         switch (element_type) {
7464         case I40E_SWITCH_ELEMENT_TYPE_MAC:
7465                 pf->mac_seid = seid;
7466                 break;
7467         case I40E_SWITCH_ELEMENT_TYPE_VEB:
7468                 /* Main VEB? */
7469                 if (uplink_seid != pf->mac_seid)
7470                         break;
7471                 if (pf->lan_veb == I40E_NO_VEB) {
7472                         int v;
7473
7474                         /* find existing or else empty VEB */
7475                         for (v = 0; v < I40E_MAX_VEB; v++) {
7476                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
7477                                         pf->lan_veb = v;
7478                                         break;
7479                                 }
7480                         }
7481                         if (pf->lan_veb == I40E_NO_VEB) {
7482                                 v = i40e_veb_mem_alloc(pf);
7483                                 if (v < 0)
7484                                         break;
7485                                 pf->lan_veb = v;
7486                         }
7487                 }
7488
7489                 pf->veb[pf->lan_veb]->seid = seid;
7490                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
7491                 pf->veb[pf->lan_veb]->pf = pf;
7492                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
7493                 break;
7494         case I40E_SWITCH_ELEMENT_TYPE_VSI:
7495                 if (num_reported != 1)
7496                         break;
7497                 /* This is immediately after a reset so we can assume this is
7498                  * the PF's VSI
7499                  */
7500                 pf->mac_seid = uplink_seid;
7501                 pf->pf_seid = downlink_seid;
7502                 pf->main_vsi_seid = seid;
7503                 if (printconfig)
7504                         dev_info(&pf->pdev->dev,
7505                                  "pf_seid=%d main_vsi_seid=%d\n",
7506                                  pf->pf_seid, pf->main_vsi_seid);
7507                 break;
7508         case I40E_SWITCH_ELEMENT_TYPE_PF:
7509         case I40E_SWITCH_ELEMENT_TYPE_VF:
7510         case I40E_SWITCH_ELEMENT_TYPE_EMP:
7511         case I40E_SWITCH_ELEMENT_TYPE_BMC:
7512         case I40E_SWITCH_ELEMENT_TYPE_PE:
7513         case I40E_SWITCH_ELEMENT_TYPE_PA:
7514                 /* ignore these for now */
7515                 break;
7516         default:
7517                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
7518                          element_type, seid);
7519                 break;
7520         }
7521 }
7522
7523 /**
7524  * i40e_fetch_switch_configuration - Get switch config from firmware
7525  * @pf: board private structure
7526  * @printconfig: should we print the contents
7527  *
7528  * Get the current switch configuration from the device and
7529  * extract a few useful SEID values.
7530  **/
7531 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7532 {
7533         struct i40e_aqc_get_switch_config_resp *sw_config;
7534         u16 next_seid = 0;
7535         int ret = 0;
7536         u8 *aq_buf;
7537         int i;
7538
7539         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
7540         if (!aq_buf)
7541                 return -ENOMEM;
7542
7543         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
7544         do {
7545                 u16 num_reported, num_total;
7546
7547                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
7548                                                 I40E_AQ_LARGE_BUF,
7549                                                 &next_seid, NULL);
7550                 if (ret) {
7551                         dev_info(&pf->pdev->dev,
7552                                  "get switch config failed %d aq_err=%x\n",
7553                                  ret, pf->hw.aq.asq_last_status);
7554                         kfree(aq_buf);
7555                         return -ENOENT;
7556                 }
7557
7558                 num_reported = le16_to_cpu(sw_config->header.num_reported);
7559                 num_total = le16_to_cpu(sw_config->header.num_total);
7560
7561                 if (printconfig)
7562                         dev_info(&pf->pdev->dev,
7563                                  "header: %d reported %d total\n",
7564                                  num_reported, num_total);
7565
7566                 if (num_reported) {
7567                         int sz = sizeof(*sw_config) * num_reported;
7568
7569                         kfree(pf->sw_config);
7570                         pf->sw_config = kzalloc(sz, GFP_KERNEL);
7571                         if (pf->sw_config)
7572                                 memcpy(pf->sw_config, sw_config, sz);
7573                 }
7574
7575                 for (i = 0; i < num_reported; i++) {
7576                         struct i40e_aqc_switch_config_element_resp *ele =
7577                                 &sw_config->element[i];
7578
7579                         i40e_setup_pf_switch_element(pf, ele, num_reported,
7580                                                      printconfig);
7581                 }
7582         } while (next_seid != 0);
7583
7584         kfree(aq_buf);
7585         return ret;
7586 }
7587
7588 /**
7589  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
7590  * @pf: board private structure
7591  * @reinit: if the Main VSI needs to re-initialized.
7592  *
7593  * Returns 0 on success, negative value on failure
7594  **/
7595 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
7596 {
7597         u32 rxfc = 0, txfc = 0, rxfc_reg;
7598         int ret;
7599
7600         /* find out what's out there already */
7601         ret = i40e_fetch_switch_configuration(pf, false);
7602         if (ret) {
7603                 dev_info(&pf->pdev->dev,
7604                          "couldn't fetch switch config, err %d, aq_err %d\n",
7605                          ret, pf->hw.aq.asq_last_status);
7606                 return ret;
7607         }
7608         i40e_pf_reset_stats(pf);
7609
7610         /* first time setup */
7611         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
7612                 struct i40e_vsi *vsi = NULL;
7613                 u16 uplink_seid;
7614
7615                 /* Set up the PF VSI associated with the PF's main VSI
7616                  * that is already in the HW switch
7617                  */
7618                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
7619                         uplink_seid = pf->veb[pf->lan_veb]->seid;
7620                 else
7621                         uplink_seid = pf->mac_seid;
7622                 if (pf->lan_vsi == I40E_NO_VSI)
7623                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
7624                 else if (reinit)
7625                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
7626                 if (!vsi) {
7627                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
7628                         i40e_fdir_teardown(pf);
7629                         return -EAGAIN;
7630                 }
7631         } else {
7632                 /* force a reset of TC and queue layout configurations */
7633                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7634                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7635                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7636                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7637         }
7638         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
7639
7640         i40e_fdir_sb_setup(pf);
7641
7642         /* Setup static PF queue filter control settings */
7643         ret = i40e_setup_pf_filter_control(pf);
7644         if (ret) {
7645                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
7646                          ret);
7647                 /* Failure here should not stop continuing other steps */
7648         }
7649
7650         /* enable RSS in the HW, even for only one queue, as the stack can use
7651          * the hash
7652          */
7653         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
7654                 i40e_config_rss(pf);
7655
7656         /* fill in link information and enable LSE reporting */
7657         i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
7658         i40e_link_event(pf);
7659
7660         /* Initialize user-specific link properties */
7661         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
7662                                   I40E_AQ_AN_COMPLETED) ? true : false);
7663         /* requested_mode is set in probe or by ethtool */
7664         if (!pf->fc_autoneg_status)
7665                 goto no_autoneg;
7666
7667         if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
7668             (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
7669                 pf->hw.fc.current_mode = I40E_FC_FULL;
7670         else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
7671                 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
7672         else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
7673                 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
7674         else
7675                 pf->hw.fc.current_mode = I40E_FC_NONE;
7676
7677         /* sync the flow control settings with the auto-neg values */
7678         switch (pf->hw.fc.current_mode) {
7679         case I40E_FC_FULL:
7680                 txfc = 1;
7681                 rxfc = 1;
7682                 break;
7683         case I40E_FC_TX_PAUSE:
7684                 txfc = 1;
7685                 rxfc = 0;
7686                 break;
7687         case I40E_FC_RX_PAUSE:
7688                 txfc = 0;
7689                 rxfc = 1;
7690                 break;
7691         case I40E_FC_NONE:
7692         case I40E_FC_DEFAULT:
7693                 txfc = 0;
7694                 rxfc = 0;
7695                 break;
7696         case I40E_FC_PFC:
7697                 /* TBD */
7698                 break;
7699         /* no default case, we have to handle all possibilities here */
7700         }
7701
7702         wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
7703
7704         rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7705                    ~I40E_PRTDCB_MFLCN_RFCE_MASK;
7706         rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
7707
7708         wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
7709
7710         goto fc_complete;
7711
7712 no_autoneg:
7713         /* disable L2 flow control, user can turn it on if they wish */
7714         wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
7715         wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7716                                          ~I40E_PRTDCB_MFLCN_RFCE_MASK);
7717
7718 fc_complete:
7719         i40e_ptp_init(pf);
7720
7721         return ret;
7722 }
7723
7724 /**
7725  * i40e_determine_queue_usage - Work out queue distribution
7726  * @pf: board private structure
7727  **/
7728 static void i40e_determine_queue_usage(struct i40e_pf *pf)
7729 {
7730         int queues_left;
7731
7732         pf->num_lan_qps = 0;
7733
7734         /* Find the max queues to be put into basic use.  We'll always be
7735          * using TC0, whether or not DCB is running, and TC0 will get the
7736          * big RSS set.
7737          */
7738         queues_left = pf->hw.func_caps.num_tx_qp;
7739
7740         if ((queues_left == 1) ||
7741             !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
7742             !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
7743                            I40E_FLAG_DCB_ENABLED))) {
7744                 /* one qp for PF, no queues for anything else */
7745                 queues_left = 0;
7746                 pf->rss_size = pf->num_lan_qps = 1;
7747
7748                 /* make sure all the fancies are disabled */
7749                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
7750                                I40E_FLAG_FD_SB_ENABLED  |
7751                                I40E_FLAG_FD_ATR_ENABLED |
7752                                I40E_FLAG_DCB_ENABLED    |
7753                                I40E_FLAG_SRIOV_ENABLED  |
7754                                I40E_FLAG_VMDQ_ENABLED);
7755         } else {
7756                 /* Not enough queues for all TCs */
7757                 if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
7758                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
7759                         pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7760                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
7761                 }
7762                 pf->num_lan_qps = pf->rss_size_max;
7763                 queues_left -= pf->num_lan_qps;
7764         }
7765
7766         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7767                 if (queues_left > 1) {
7768                         queues_left -= 1; /* save 1 queue for FD */
7769                 } else {
7770                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7771                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
7772                 }
7773         }
7774
7775         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7776             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
7777                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
7778                                         (queues_left / pf->num_vf_qps));
7779                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
7780         }
7781
7782         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7783             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
7784                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
7785                                           (queues_left / pf->num_vmdq_qps));
7786                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
7787         }
7788
7789         pf->queues_left = queues_left;
7790         return;
7791 }
7792
7793 /**
7794  * i40e_setup_pf_filter_control - Setup PF static filter control
7795  * @pf: PF to be setup
7796  *
7797  * i40e_setup_pf_filter_control sets up a pf's initial filter control
7798  * settings. If PE/FCoE are enabled then it will also set the per PF
7799  * based filter sizes required for them. It also enables Flow director,
7800  * ethertype and macvlan type filter settings for the pf.
7801  *
7802  * Returns 0 on success, negative on failure
7803  **/
7804 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
7805 {
7806         struct i40e_filter_control_settings *settings = &pf->filter_settings;
7807
7808         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
7809
7810         /* Flow Director is enabled */
7811         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
7812                 settings->enable_fdir = true;
7813
7814         /* Ethtype and MACVLAN filters enabled for PF */
7815         settings->enable_ethtype = true;
7816         settings->enable_macvlan = true;
7817
7818         if (i40e_set_filter_control(&pf->hw, settings))
7819                 return -ENOENT;
7820
7821         return 0;
7822 }
7823
7824 /**
7825  * i40e_probe - Device initialization routine
7826  * @pdev: PCI device information struct
7827  * @ent: entry in i40e_pci_tbl
7828  *
7829  * i40e_probe initializes a pf identified by a pci_dev structure.
7830  * The OS initialization, configuring of the pf private structure,
7831  * and a hardware reset occur.
7832  *
7833  * Returns 0 on success, negative on failure
7834  **/
7835 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7836 {
7837         struct i40e_driver_version dv;
7838         struct i40e_pf *pf;
7839         struct i40e_hw *hw;
7840         static u16 pfs_found;
7841         u16 link_status;
7842         int err = 0;
7843         u32 len;
7844
7845         err = pci_enable_device_mem(pdev);
7846         if (err)
7847                 return err;
7848
7849         /* set up for high or low dma */
7850         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7851                 /* coherent mask for the same size will always succeed if
7852                  * dma_set_mask does
7853                  */
7854                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
7855         } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7856                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7857         } else {
7858                 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
7859                 err = -EIO;
7860                 goto err_dma;
7861         }
7862
7863         /* set up pci connections */
7864         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7865                                            IORESOURCE_MEM), i40e_driver_name);
7866         if (err) {
7867                 dev_info(&pdev->dev,
7868                          "pci_request_selected_regions failed %d\n", err);
7869                 goto err_pci_reg;
7870         }
7871
7872         pci_enable_pcie_error_reporting(pdev);
7873         pci_set_master(pdev);
7874
7875         /* Now that we have a PCI connection, we need to do the
7876          * low level device setup.  This is primarily setting up
7877          * the Admin Queue structures and then querying for the
7878          * device's current profile information.
7879          */
7880         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
7881         if (!pf) {
7882                 err = -ENOMEM;
7883                 goto err_pf_alloc;
7884         }
7885         pf->next_vsi = 0;
7886         pf->pdev = pdev;
7887         set_bit(__I40E_DOWN, &pf->state);
7888
7889         hw = &pf->hw;
7890         hw->back = pf;
7891         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7892                               pci_resource_len(pdev, 0));
7893         if (!hw->hw_addr) {
7894                 err = -EIO;
7895                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
7896                          (unsigned int)pci_resource_start(pdev, 0),
7897                          (unsigned int)pci_resource_len(pdev, 0), err);
7898                 goto err_ioremap;
7899         }
7900         hw->vendor_id = pdev->vendor;
7901         hw->device_id = pdev->device;
7902         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
7903         hw->subsystem_vendor_id = pdev->subsystem_vendor;
7904         hw->subsystem_device_id = pdev->subsystem_device;
7905         hw->bus.device = PCI_SLOT(pdev->devfn);
7906         hw->bus.func = PCI_FUNC(pdev->devfn);
7907         pf->instance = pfs_found;
7908
7909         /* do a special CORER for clearing PXE mode once at init */
7910         if (hw->revision_id == 0 &&
7911             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
7912                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
7913                 i40e_flush(hw);
7914                 msleep(200);
7915                 pf->corer_count++;
7916
7917                 i40e_clear_pxe_mode(hw);
7918         }
7919
7920         /* Reset here to make sure all is clean and to define PF 'n' */
7921         err = i40e_pf_reset(hw);
7922         if (err) {
7923                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
7924                 goto err_pf_reset;
7925         }
7926         pf->pfr_count++;
7927
7928         hw->aq.num_arq_entries = I40E_AQ_LEN;
7929         hw->aq.num_asq_entries = I40E_AQ_LEN;
7930         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7931         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7932         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
7933         snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
7934                  "%s-pf%d:misc",
7935                  dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
7936
7937         err = i40e_init_shared_code(hw);
7938         if (err) {
7939                 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
7940                 goto err_pf_reset;
7941         }
7942
7943         /* set up a default setting for link flow control */
7944         pf->hw.fc.requested_mode = I40E_FC_NONE;
7945
7946         err = i40e_init_adminq(hw);
7947         dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7948         if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
7949                  >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
7950                 dev_info(&pdev->dev,
7951                          "warning: NVM version not supported, supported version: %02x.%02x\n",
7952                          I40E_CURRENT_NVM_VERSION_HI,
7953                          I40E_CURRENT_NVM_VERSION_LO);
7954         }
7955         if (err) {
7956                 dev_info(&pdev->dev,
7957                          "init_adminq failed: %d expecting API %02x.%02x\n",
7958                          err,
7959                          I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7960                 goto err_pf_reset;
7961         }
7962
7963         i40e_clear_pxe_mode(hw);
7964         err = i40e_get_capabilities(pf);
7965         if (err)
7966                 goto err_adminq_setup;
7967
7968         err = i40e_sw_init(pf);
7969         if (err) {
7970                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7971                 goto err_sw_init;
7972         }
7973
7974         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7975                                 hw->func_caps.num_rx_qp,
7976                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7977         if (err) {
7978                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7979                 goto err_init_lan_hmc;
7980         }
7981
7982         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7983         if (err) {
7984                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7985                 err = -ENOENT;
7986                 goto err_configure_lan_hmc;
7987         }
7988
7989         i40e_get_mac_addr(hw, hw->mac.addr);
7990         if (!is_valid_ether_addr(hw->mac.addr)) {
7991                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7992                 err = -EIO;
7993                 goto err_mac_addr;
7994         }
7995         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7996         memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7997
7998         pci_set_drvdata(pdev, pf);
7999         pci_save_state(pdev);
8000 #ifdef CONFIG_I40E_DCB
8001         err = i40e_init_pf_dcb(pf);
8002         if (err) {
8003                 dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
8004                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8005                 goto err_init_dcb;
8006         }
8007 #endif /* CONFIG_I40E_DCB */
8008
8009         /* set up periodic task facility */
8010         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
8011         pf->service_timer_period = HZ;
8012
8013         INIT_WORK(&pf->service_task, i40e_service_task);
8014         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
8015         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
8016         pf->link_check_timeout = jiffies;
8017
8018         /* WoL defaults to disabled */
8019         pf->wol_en = false;
8020         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
8021
8022         /* set up the main switch operations */
8023         i40e_determine_queue_usage(pf);
8024         i40e_init_interrupt_scheme(pf);
8025
8026         /* Set up the *vsi struct based on the number of VSIs in the HW,
8027          * and set up our local tracking of the MAIN PF vsi.
8028          */
8029         len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
8030         pf->vsi = kzalloc(len, GFP_KERNEL);
8031         if (!pf->vsi) {
8032                 err = -ENOMEM;
8033                 goto err_switch_setup;
8034         }
8035
8036         err = i40e_setup_pf_switch(pf, false);
8037         if (err) {
8038                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
8039                 goto err_vsis;
8040         }
8041
8042         /* The main driver is (mostly) up and happy. We need to set this state
8043          * before setting up the misc vector or we get a race and the vector
8044          * ends up disabled forever.
8045          */
8046         clear_bit(__I40E_DOWN, &pf->state);
8047
8048         /* In case of MSIX we are going to setup the misc vector right here
8049          * to handle admin queue events etc. In case of legacy and MSI
8050          * the misc functionality and queue processing is combined in
8051          * the same vector and that gets setup at open.
8052          */
8053         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8054                 err = i40e_setup_misc_vector(pf);
8055                 if (err) {
8056                         dev_info(&pdev->dev,
8057                                  "setup of misc vector failed: %d\n", err);
8058                         goto err_vsis;
8059                 }
8060         }
8061
8062         /* prep for VF support */
8063         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
8064             (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
8065                 u32 val;
8066
8067                 /* disable link interrupts for VFs */
8068                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
8069                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
8070                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
8071                 i40e_flush(hw);
8072         }
8073
8074         pfs_found++;
8075
8076         i40e_dbg_pf_init(pf);
8077
8078         /* tell the firmware that we're starting */
8079         dv.major_version = DRV_VERSION_MAJOR;
8080         dv.minor_version = DRV_VERSION_MINOR;
8081         dv.build_version = DRV_VERSION_BUILD;
8082         dv.subbuild_version = 0;
8083         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
8084
8085         /* since everything's happy, start the service_task timer */
8086         mod_timer(&pf->service_timer,
8087                   round_jiffies(jiffies + pf->service_timer_period));
8088
8089         /* Get the negotiated link width and speed from PCI config space */
8090         pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
8091
8092         i40e_set_pci_config_data(hw, link_status);
8093
8094         dev_info(&pdev->dev, "PCI Express: %s %s\n",
8095                 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
8096                  hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
8097                  hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
8098                  "Unknown"),
8099                 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
8100                  hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
8101                  hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
8102                  hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
8103                  "Unknown"));
8104
8105         if (hw->bus.width < i40e_bus_width_pcie_x8 ||
8106             hw->bus.speed < i40e_bus_speed_8000) {
8107                 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
8108                 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
8109         }
8110
8111         return 0;
8112
8113         /* Unwind what we've done if something failed in the setup */
8114 err_vsis:
8115         set_bit(__I40E_DOWN, &pf->state);
8116         i40e_clear_interrupt_scheme(pf);
8117         kfree(pf->vsi);
8118 err_switch_setup:
8119         i40e_reset_interrupt_capability(pf);
8120         del_timer_sync(&pf->service_timer);
8121 #ifdef CONFIG_I40E_DCB
8122 err_init_dcb:
8123 #endif /* CONFIG_I40E_DCB */
8124 err_mac_addr:
8125 err_configure_lan_hmc:
8126         (void)i40e_shutdown_lan_hmc(hw);
8127 err_init_lan_hmc:
8128         kfree(pf->qp_pile);
8129         kfree(pf->irq_pile);
8130 err_sw_init:
8131 err_adminq_setup:
8132         (void)i40e_shutdown_adminq(hw);
8133 err_pf_reset:
8134         iounmap(hw->hw_addr);
8135 err_ioremap:
8136         kfree(pf);
8137 err_pf_alloc:
8138         pci_disable_pcie_error_reporting(pdev);
8139         pci_release_selected_regions(pdev,
8140                                      pci_select_bars(pdev, IORESOURCE_MEM));
8141 err_pci_reg:
8142 err_dma:
8143         pci_disable_device(pdev);
8144         return err;
8145 }
8146
8147 /**
8148  * i40e_remove - Device removal routine
8149  * @pdev: PCI device information struct
8150  *
8151  * i40e_remove is called by the PCI subsystem to alert the driver
8152  * that is should release a PCI device.  This could be caused by a
8153  * Hot-Plug event, or because the driver is going to be removed from
8154  * memory.
8155  **/
8156 static void i40e_remove(struct pci_dev *pdev)
8157 {
8158         struct i40e_pf *pf = pci_get_drvdata(pdev);
8159         i40e_status ret_code;
8160         u32 reg;
8161         int i;
8162
8163         i40e_dbg_pf_exit(pf);
8164
8165         i40e_ptp_stop(pf);
8166
8167         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8168                 i40e_free_vfs(pf);
8169                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8170         }
8171
8172         /* no more scheduling of any task */
8173         set_bit(__I40E_DOWN, &pf->state);
8174         del_timer_sync(&pf->service_timer);
8175         cancel_work_sync(&pf->service_task);
8176
8177         i40e_fdir_teardown(pf);
8178
8179         /* If there is a switch structure or any orphans, remove them.
8180          * This will leave only the PF's VSI remaining.
8181          */
8182         for (i = 0; i < I40E_MAX_VEB; i++) {
8183                 if (!pf->veb[i])
8184                         continue;
8185
8186                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
8187                     pf->veb[i]->uplink_seid == 0)
8188                         i40e_switch_branch_release(pf->veb[i]);
8189         }
8190
8191         /* Now we can shutdown the PF's VSI, just before we kill
8192          * adminq and hmc.
8193          */
8194         if (pf->vsi[pf->lan_vsi])
8195                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
8196
8197         i40e_stop_misc_vector(pf);
8198         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
8199                 synchronize_irq(pf->msix_entries[0].vector);
8200                 free_irq(pf->msix_entries[0].vector, pf);
8201         }
8202
8203         /* shutdown and destroy the HMC */
8204         ret_code = i40e_shutdown_lan_hmc(&pf->hw);
8205         if (ret_code)
8206                 dev_warn(&pdev->dev,
8207                          "Failed to destroy the HMC resources: %d\n", ret_code);
8208
8209         /* shutdown the adminq */
8210         ret_code = i40e_shutdown_adminq(&pf->hw);
8211         if (ret_code)
8212                 dev_warn(&pdev->dev,
8213                          "Failed to destroy the Admin Queue resources: %d\n",
8214                          ret_code);
8215
8216         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
8217         i40e_clear_interrupt_scheme(pf);
8218         for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
8219                 if (pf->vsi[i]) {
8220                         i40e_vsi_clear_rings(pf->vsi[i]);
8221                         i40e_vsi_clear(pf->vsi[i]);
8222                         pf->vsi[i] = NULL;
8223                 }
8224         }
8225
8226         for (i = 0; i < I40E_MAX_VEB; i++) {
8227                 kfree(pf->veb[i]);
8228                 pf->veb[i] = NULL;
8229         }
8230
8231         kfree(pf->qp_pile);
8232         kfree(pf->irq_pile);
8233         kfree(pf->sw_config);
8234         kfree(pf->vsi);
8235
8236         /* force a PF reset to clean anything leftover */
8237         reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
8238         wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
8239         i40e_flush(&pf->hw);
8240
8241         iounmap(pf->hw.hw_addr);
8242         kfree(pf);
8243         pci_release_selected_regions(pdev,
8244                                      pci_select_bars(pdev, IORESOURCE_MEM));
8245
8246         pci_disable_pcie_error_reporting(pdev);
8247         pci_disable_device(pdev);
8248 }
8249
8250 /**
8251  * i40e_pci_error_detected - warning that something funky happened in PCI land
8252  * @pdev: PCI device information struct
8253  *
8254  * Called to warn that something happened and the error handling steps
8255  * are in progress.  Allows the driver to quiesce things, be ready for
8256  * remediation.
8257  **/
8258 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
8259                                                 enum pci_channel_state error)
8260 {
8261         struct i40e_pf *pf = pci_get_drvdata(pdev);
8262
8263         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
8264
8265         /* shutdown all operations */
8266         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
8267                 rtnl_lock();
8268                 i40e_prep_for_reset(pf);
8269                 rtnl_unlock();
8270         }
8271
8272         /* Request a slot reset */
8273         return PCI_ERS_RESULT_NEED_RESET;
8274 }
8275
8276 /**
8277  * i40e_pci_error_slot_reset - a PCI slot reset just happened
8278  * @pdev: PCI device information struct
8279  *
8280  * Called to find if the driver can work with the device now that
8281  * the pci slot has been reset.  If a basic connection seems good
8282  * (registers are readable and have sane content) then return a
8283  * happy little PCI_ERS_RESULT_xxx.
8284  **/
8285 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
8286 {
8287         struct i40e_pf *pf = pci_get_drvdata(pdev);
8288         pci_ers_result_t result;
8289         int err;
8290         u32 reg;
8291
8292         dev_info(&pdev->dev, "%s\n", __func__);
8293         if (pci_enable_device_mem(pdev)) {
8294                 dev_info(&pdev->dev,
8295                          "Cannot re-enable PCI device after reset.\n");
8296                 result = PCI_ERS_RESULT_DISCONNECT;
8297         } else {
8298                 pci_set_master(pdev);
8299                 pci_restore_state(pdev);
8300                 pci_save_state(pdev);
8301                 pci_wake_from_d3(pdev, false);
8302
8303                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
8304                 if (reg == 0)
8305                         result = PCI_ERS_RESULT_RECOVERED;
8306                 else
8307                         result = PCI_ERS_RESULT_DISCONNECT;
8308         }
8309
8310         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8311         if (err) {
8312                 dev_info(&pdev->dev,
8313                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8314                          err);
8315                 /* non-fatal, continue */
8316         }
8317
8318         return result;
8319 }
8320
8321 /**
8322  * i40e_pci_error_resume - restart operations after PCI error recovery
8323  * @pdev: PCI device information struct
8324  *
8325  * Called to allow the driver to bring things back up after PCI error
8326  * and/or reset recovery has finished.
8327  **/
8328 static void i40e_pci_error_resume(struct pci_dev *pdev)
8329 {
8330         struct i40e_pf *pf = pci_get_drvdata(pdev);
8331
8332         dev_info(&pdev->dev, "%s\n", __func__);
8333         if (test_bit(__I40E_SUSPENDED, &pf->state))
8334                 return;
8335
8336         rtnl_lock();
8337         i40e_handle_reset_warning(pf);
8338         rtnl_lock();
8339 }
8340
8341 /**
8342  * i40e_shutdown - PCI callback for shutting down
8343  * @pdev: PCI device information struct
8344  **/
8345 static void i40e_shutdown(struct pci_dev *pdev)
8346 {
8347         struct i40e_pf *pf = pci_get_drvdata(pdev);
8348         struct i40e_hw *hw = &pf->hw;
8349
8350         set_bit(__I40E_SUSPENDED, &pf->state);
8351         set_bit(__I40E_DOWN, &pf->state);
8352         rtnl_lock();
8353         i40e_prep_for_reset(pf);
8354         rtnl_unlock();
8355
8356         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8357         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8358
8359         if (system_state == SYSTEM_POWER_OFF) {
8360                 pci_wake_from_d3(pdev, pf->wol_en);
8361                 pci_set_power_state(pdev, PCI_D3hot);
8362         }
8363 }
8364
8365 #ifdef CONFIG_PM
8366 /**
8367  * i40e_suspend - PCI callback for moving to D3
8368  * @pdev: PCI device information struct
8369  **/
8370 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
8371 {
8372         struct i40e_pf *pf = pci_get_drvdata(pdev);
8373         struct i40e_hw *hw = &pf->hw;
8374
8375         set_bit(__I40E_SUSPENDED, &pf->state);
8376         set_bit(__I40E_DOWN, &pf->state);
8377         rtnl_lock();
8378         i40e_prep_for_reset(pf);
8379         rtnl_unlock();
8380
8381         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8382         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8383
8384         pci_wake_from_d3(pdev, pf->wol_en);
8385         pci_set_power_state(pdev, PCI_D3hot);
8386
8387         return 0;
8388 }
8389
8390 /**
8391  * i40e_resume - PCI callback for waking up from D3
8392  * @pdev: PCI device information struct
8393  **/
8394 static int i40e_resume(struct pci_dev *pdev)
8395 {
8396         struct i40e_pf *pf = pci_get_drvdata(pdev);
8397         u32 err;
8398
8399         pci_set_power_state(pdev, PCI_D0);
8400         pci_restore_state(pdev);
8401         /* pci_restore_state() clears dev->state_saves, so
8402          * call pci_save_state() again to restore it.
8403          */
8404         pci_save_state(pdev);
8405
8406         err = pci_enable_device_mem(pdev);
8407         if (err) {
8408                 dev_err(&pdev->dev,
8409                         "%s: Cannot enable PCI device from suspend\n",
8410                         __func__);
8411                 return err;
8412         }
8413         pci_set_master(pdev);
8414
8415         /* no wakeup events while running */
8416         pci_wake_from_d3(pdev, false);
8417
8418         /* handling the reset will rebuild the device state */
8419         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
8420                 clear_bit(__I40E_DOWN, &pf->state);
8421                 rtnl_lock();
8422                 i40e_reset_and_rebuild(pf, false);
8423                 rtnl_unlock();
8424         }
8425
8426         return 0;
8427 }
8428
8429 #endif
8430 static const struct pci_error_handlers i40e_err_handler = {
8431         .error_detected = i40e_pci_error_detected,
8432         .slot_reset = i40e_pci_error_slot_reset,
8433         .resume = i40e_pci_error_resume,
8434 };
8435
8436 static struct pci_driver i40e_driver = {
8437         .name     = i40e_driver_name,
8438         .id_table = i40e_pci_tbl,
8439         .probe    = i40e_probe,
8440         .remove   = i40e_remove,
8441 #ifdef CONFIG_PM
8442         .suspend  = i40e_suspend,
8443         .resume   = i40e_resume,
8444 #endif
8445         .shutdown = i40e_shutdown,
8446         .err_handler = &i40e_err_handler,
8447         .sriov_configure = i40e_pci_sriov_configure,
8448 };
8449
8450 /**
8451  * i40e_init_module - Driver registration routine
8452  *
8453  * i40e_init_module is the first routine called when the driver is
8454  * loaded. All it does is register with the PCI subsystem.
8455  **/
8456 static int __init i40e_init_module(void)
8457 {
8458         pr_info("%s: %s - version %s\n", i40e_driver_name,
8459                 i40e_driver_string, i40e_driver_version_str);
8460         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
8461         i40e_dbg_init();
8462         return pci_register_driver(&i40e_driver);
8463 }
8464 module_init(i40e_init_module);
8465
8466 /**
8467  * i40e_exit_module - Driver exit cleanup routine
8468  *
8469  * i40e_exit_module is called just before the driver is removed
8470  * from memory.
8471  **/
8472 static void __exit i40e_exit_module(void)
8473 {
8474         pci_unregister_driver(&i40e_driver);
8475         i40e_dbg_exit();
8476 }
8477 module_exit(i40e_exit_module);