i40e: refactor reset code
[linux-2.6-block.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* Local includes */
29#include "i40e.h"
30
31const char i40e_driver_name[] = "i40e";
32static const char i40e_driver_string[] =
33 "Intel(R) Ethernet Connection XL710 Network Driver";
34
35#define DRV_KERN "-k"
36
37#define DRV_VERSION_MAJOR 0
38#define DRV_VERSION_MINOR 3
217ffd41 39#define DRV_VERSION_BUILD 12
41c445ff
JB
40#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
41 __stringify(DRV_VERSION_MINOR) "." \
42 __stringify(DRV_VERSION_BUILD) DRV_KERN
43const char i40e_driver_version_str[] = DRV_VERSION;
44static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
45
46/* a bit of forward declarations */
47static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
48static void i40e_handle_reset_warning(struct i40e_pf *pf);
49static int i40e_add_vsi(struct i40e_vsi *vsi);
50static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
51static int i40e_setup_pf_switch(struct i40e_pf *pf);
52static int i40e_setup_misc_vector(struct i40e_pf *pf);
53static void i40e_determine_queue_usage(struct i40e_pf *pf);
54static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
55
56/* i40e_pci_tbl - PCI Device ID Table
57 *
58 * Last entry must be all 0s
59 *
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
61 * Class, Class Mask, private data (not used) }
62 */
63static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
64 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
65 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
66 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
67 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
68 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
69 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
70 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
71 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
72 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
73 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
74 /* required last entry */
75 {0, }
76};
77MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
78
79#define I40E_MAX_VF_COUNT 128
80static int debug = -1;
81module_param(debug, int, 0);
82MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
83
84MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
85MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(DRV_VERSION);
88
89/**
90 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
91 * @hw: pointer to the HW structure
92 * @mem: ptr to mem struct to fill out
93 * @size: size of memory requested
94 * @alignment: what to align the allocation to
95 **/
96int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
97 u64 size, u32 alignment)
98{
99 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
100
101 mem->size = ALIGN(size, alignment);
102 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
103 &mem->pa, GFP_KERNEL);
93bc73b8
JB
104 if (!mem->va)
105 return -ENOMEM;
41c445ff 106
93bc73b8 107 return 0;
41c445ff
JB
108}
109
110/**
111 * i40e_free_dma_mem_d - OS specific memory free for shared code
112 * @hw: pointer to the HW structure
113 * @mem: ptr to mem struct to free
114 **/
115int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
116{
117 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
118
119 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
120 mem->va = NULL;
121 mem->pa = 0;
122 mem->size = 0;
123
124 return 0;
125}
126
127/**
128 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to fill out
131 * @size: size of memory requested
132 **/
133int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
134 u32 size)
135{
136 mem->size = size;
137 mem->va = kzalloc(size, GFP_KERNEL);
138
93bc73b8
JB
139 if (!mem->va)
140 return -ENOMEM;
41c445ff 141
93bc73b8 142 return 0;
41c445ff
JB
143}
144
145/**
146 * i40e_free_virt_mem_d - OS specific memory free for shared code
147 * @hw: pointer to the HW structure
148 * @mem: ptr to mem struct to free
149 **/
150int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
151{
152 /* it's ok to kfree a NULL pointer */
153 kfree(mem->va);
154 mem->va = NULL;
155 mem->size = 0;
156
157 return 0;
158}
159
160/**
161 * i40e_get_lump - find a lump of free generic resource
162 * @pf: board private structure
163 * @pile: the pile of resource to search
164 * @needed: the number of items needed
165 * @id: an owner id to stick on the items assigned
166 *
167 * Returns the base item index of the lump, or negative for error
168 *
169 * The search_hint trick and lack of advanced fit-finding only work
170 * because we're highly likely to have all the same size lump requests.
171 * Linear search time and any fragmentation should be minimal.
172 **/
173static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
174 u16 needed, u16 id)
175{
176 int ret = -ENOMEM;
ddf434ac 177 int i, j;
41c445ff
JB
178
179 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
180 dev_info(&pf->pdev->dev,
181 "param err: pile=%p needed=%d id=0x%04x\n",
182 pile, needed, id);
183 return -EINVAL;
184 }
185
186 /* start the linear search with an imperfect hint */
187 i = pile->search_hint;
ddf434ac 188 while (i < pile->num_entries) {
41c445ff
JB
189 /* skip already allocated entries */
190 if (pile->list[i] & I40E_PILE_VALID_BIT) {
191 i++;
192 continue;
193 }
194
195 /* do we have enough in this lump? */
196 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
197 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
198 break;
199 }
200
201 if (j == needed) {
202 /* there was enough, so assign it to the requestor */
203 for (j = 0; j < needed; j++)
204 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
205 ret = i;
206 pile->search_hint = i + j;
ddf434ac 207 break;
41c445ff
JB
208 } else {
209 /* not enough, so skip over it and continue looking */
210 i += j;
211 }
212 }
213
214 return ret;
215}
216
217/**
218 * i40e_put_lump - return a lump of generic resource
219 * @pile: the pile of resource to search
220 * @index: the base item index
221 * @id: the owner id of the items assigned
222 *
223 * Returns the count of items in the lump
224 **/
225static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
226{
227 int valid_id = (id | I40E_PILE_VALID_BIT);
228 int count = 0;
229 int i;
230
231 if (!pile || index >= pile->num_entries)
232 return -EINVAL;
233
234 for (i = index;
235 i < pile->num_entries && pile->list[i] == valid_id;
236 i++) {
237 pile->list[i] = 0;
238 count++;
239 }
240
241 if (count && index < pile->search_hint)
242 pile->search_hint = index;
243
244 return count;
245}
246
247/**
248 * i40e_service_event_schedule - Schedule the service task to wake up
249 * @pf: board private structure
250 *
251 * If not already scheduled, this puts the task into the work queue
252 **/
253static void i40e_service_event_schedule(struct i40e_pf *pf)
254{
255 if (!test_bit(__I40E_DOWN, &pf->state) &&
256 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
257 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
258 schedule_work(&pf->service_task);
259}
260
261/**
262 * i40e_tx_timeout - Respond to a Tx Hang
263 * @netdev: network interface device structure
264 *
265 * If any port has noticed a Tx timeout, it is likely that the whole
266 * device is munged, not just the one netdev port, so go for the full
267 * reset.
268 **/
269static void i40e_tx_timeout(struct net_device *netdev)
270{
271 struct i40e_netdev_priv *np = netdev_priv(netdev);
272 struct i40e_vsi *vsi = np->vsi;
273 struct i40e_pf *pf = vsi->back;
274
275 pf->tx_timeout_count++;
276
277 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
278 pf->tx_timeout_recovery_level = 0;
279 pf->tx_timeout_last_recovery = jiffies;
280 netdev_info(netdev, "tx_timeout recovery level %d\n",
281 pf->tx_timeout_recovery_level);
282
283 switch (pf->tx_timeout_recovery_level) {
284 case 0:
285 /* disable and re-enable queues for the VSI */
286 if (in_interrupt()) {
287 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
288 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
289 } else {
290 i40e_vsi_reinit_locked(vsi);
291 }
292 break;
293 case 1:
294 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
295 break;
296 case 2:
297 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
298 break;
299 case 3:
300 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
301 break;
302 default:
303 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
304 i40e_down(vsi);
305 break;
306 }
307 i40e_service_event_schedule(pf);
308 pf->tx_timeout_recovery_level++;
309}
310
311/**
312 * i40e_release_rx_desc - Store the new tail and head values
313 * @rx_ring: ring to bump
314 * @val: new head index
315 **/
316static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
317{
318 rx_ring->next_to_use = val;
319
320 /* Force memory writes to complete before letting h/w
321 * know there are new descriptors to fetch. (Only
322 * applicable for weak-ordered memory model archs,
323 * such as IA-64).
324 */
325 wmb();
326 writel(val, rx_ring->tail);
327}
328
329/**
330 * i40e_get_vsi_stats_struct - Get System Network Statistics
331 * @vsi: the VSI we care about
332 *
333 * Returns the address of the device statistics structure.
334 * The statistics are actually updated from the service task.
335 **/
336struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
337{
338 return &vsi->net_stats;
339}
340
341/**
342 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
343 * @netdev: network interface device structure
344 *
345 * Returns the address of the device statistics structure.
346 * The statistics are actually updated from the service task.
347 **/
348static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
349 struct net_device *netdev,
980e9b11 350 struct rtnl_link_stats64 *stats)
41c445ff
JB
351{
352 struct i40e_netdev_priv *np = netdev_priv(netdev);
353 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
354 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
355 int i;
356
357 rcu_read_lock();
358 for (i = 0; i < vsi->num_queue_pairs; i++) {
359 struct i40e_ring *tx_ring, *rx_ring;
360 u64 bytes, packets;
361 unsigned int start;
362
363 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
364 if (!tx_ring)
365 continue;
366
367 do {
368 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
369 packets = tx_ring->stats.packets;
370 bytes = tx_ring->stats.bytes;
371 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
372
373 stats->tx_packets += packets;
374 stats->tx_bytes += bytes;
375 rx_ring = &tx_ring[1];
376
377 do {
378 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
379 packets = rx_ring->stats.packets;
380 bytes = rx_ring->stats.bytes;
381 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
41c445ff 382
980e9b11
AD
383 stats->rx_packets += packets;
384 stats->rx_bytes += bytes;
385 }
386 rcu_read_unlock();
387
388 /* following stats updated by ixgbe_watchdog_task() */
389 stats->multicast = vsi_stats->multicast;
390 stats->tx_errors = vsi_stats->tx_errors;
391 stats->tx_dropped = vsi_stats->tx_dropped;
392 stats->rx_errors = vsi_stats->rx_errors;
393 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
394 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff 395
980e9b11 396 return stats;
41c445ff
JB
397}
398
399/**
400 * i40e_vsi_reset_stats - Resets all stats of the given vsi
401 * @vsi: the VSI to have its stats reset
402 **/
403void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
404{
405 struct rtnl_link_stats64 *ns;
406 int i;
407
408 if (!vsi)
409 return;
410
411 ns = i40e_get_vsi_stats_struct(vsi);
412 memset(ns, 0, sizeof(*ns));
413 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
414 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
415 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
416 if (vsi->rx_rings)
417 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
418 memset(&vsi->rx_rings[i]->stats, 0 ,
419 sizeof(vsi->rx_rings[i]->stats));
420 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
421 sizeof(vsi->rx_rings[i]->rx_stats));
422 memset(&vsi->tx_rings[i]->stats, 0 ,
423 sizeof(vsi->tx_rings[i]->stats));
424 memset(&vsi->tx_rings[i]->tx_stats, 0,
425 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff
JB
426 }
427 vsi->stat_offsets_loaded = false;
428}
429
430/**
431 * i40e_pf_reset_stats - Reset all of the stats for the given pf
432 * @pf: the PF to be reset
433 **/
434void i40e_pf_reset_stats(struct i40e_pf *pf)
435{
436 memset(&pf->stats, 0, sizeof(pf->stats));
437 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
438 pf->stat_offsets_loaded = false;
439}
440
441/**
442 * i40e_stat_update48 - read and update a 48 bit stat from the chip
443 * @hw: ptr to the hardware info
444 * @hireg: the high 32 bit reg to read
445 * @loreg: the low 32 bit reg to read
446 * @offset_loaded: has the initial offset been loaded yet
447 * @offset: ptr to current offset value
448 * @stat: ptr to the stat
449 *
450 * Since the device stats are not reset at PFReset, they likely will not
451 * be zeroed when the driver starts. We'll save the first values read
452 * and use them as offsets to be subtracted from the raw values in order
453 * to report stats that count from zero. In the process, we also manage
454 * the potential roll-over.
455 **/
456static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
457 bool offset_loaded, u64 *offset, u64 *stat)
458{
459 u64 new_data;
460
461 if (hw->device_id == I40E_QEMU_DEVICE_ID) {
462 new_data = rd32(hw, loreg);
463 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
464 } else {
465 new_data = rd64(hw, loreg);
466 }
467 if (!offset_loaded)
468 *offset = new_data;
469 if (likely(new_data >= *offset))
470 *stat = new_data - *offset;
471 else
472 *stat = (new_data + ((u64)1 << 48)) - *offset;
473 *stat &= 0xFFFFFFFFFFFFULL;
474}
475
476/**
477 * i40e_stat_update32 - read and update a 32 bit stat from the chip
478 * @hw: ptr to the hardware info
479 * @reg: the hw reg to read
480 * @offset_loaded: has the initial offset been loaded yet
481 * @offset: ptr to current offset value
482 * @stat: ptr to the stat
483 **/
484static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
485 bool offset_loaded, u64 *offset, u64 *stat)
486{
487 u32 new_data;
488
489 new_data = rd32(hw, reg);
490 if (!offset_loaded)
491 *offset = new_data;
492 if (likely(new_data >= *offset))
493 *stat = (u32)(new_data - *offset);
494 else
495 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
496}
497
498/**
499 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
500 * @vsi: the VSI to be updated
501 **/
502void i40e_update_eth_stats(struct i40e_vsi *vsi)
503{
504 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
505 struct i40e_pf *pf = vsi->back;
506 struct i40e_hw *hw = &pf->hw;
507 struct i40e_eth_stats *oes;
508 struct i40e_eth_stats *es; /* device's eth stats */
509
510 es = &vsi->eth_stats;
511 oes = &vsi->eth_stats_offsets;
512
513 /* Gather up the stats that the hw collects */
514 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
515 vsi->stat_offsets_loaded,
516 &oes->tx_errors, &es->tx_errors);
517 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
518 vsi->stat_offsets_loaded,
519 &oes->rx_discards, &es->rx_discards);
520
521 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
522 I40E_GLV_GORCL(stat_idx),
523 vsi->stat_offsets_loaded,
524 &oes->rx_bytes, &es->rx_bytes);
525 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
526 I40E_GLV_UPRCL(stat_idx),
527 vsi->stat_offsets_loaded,
528 &oes->rx_unicast, &es->rx_unicast);
529 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
530 I40E_GLV_MPRCL(stat_idx),
531 vsi->stat_offsets_loaded,
532 &oes->rx_multicast, &es->rx_multicast);
533 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
534 I40E_GLV_BPRCL(stat_idx),
535 vsi->stat_offsets_loaded,
536 &oes->rx_broadcast, &es->rx_broadcast);
537
538 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
539 I40E_GLV_GOTCL(stat_idx),
540 vsi->stat_offsets_loaded,
541 &oes->tx_bytes, &es->tx_bytes);
542 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
543 I40E_GLV_UPTCL(stat_idx),
544 vsi->stat_offsets_loaded,
545 &oes->tx_unicast, &es->tx_unicast);
546 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
547 I40E_GLV_MPTCL(stat_idx),
548 vsi->stat_offsets_loaded,
549 &oes->tx_multicast, &es->tx_multicast);
550 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
551 I40E_GLV_BPTCL(stat_idx),
552 vsi->stat_offsets_loaded,
553 &oes->tx_broadcast, &es->tx_broadcast);
554 vsi->stat_offsets_loaded = true;
555}
556
557/**
558 * i40e_update_veb_stats - Update Switch component statistics
559 * @veb: the VEB being updated
560 **/
561static void i40e_update_veb_stats(struct i40e_veb *veb)
562{
563 struct i40e_pf *pf = veb->pf;
564 struct i40e_hw *hw = &pf->hw;
565 struct i40e_eth_stats *oes;
566 struct i40e_eth_stats *es; /* device's eth stats */
567 int idx = 0;
568
569 idx = veb->stats_idx;
570 es = &veb->stats;
571 oes = &veb->stats_offsets;
572
573 /* Gather up the stats that the hw collects */
574 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
575 veb->stat_offsets_loaded,
576 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
577 if (hw->revision_id > 0)
578 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
579 veb->stat_offsets_loaded,
580 &oes->rx_unknown_protocol,
581 &es->rx_unknown_protocol);
41c445ff
JB
582 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
583 veb->stat_offsets_loaded,
584 &oes->rx_bytes, &es->rx_bytes);
585 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
586 veb->stat_offsets_loaded,
587 &oes->rx_unicast, &es->rx_unicast);
588 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
589 veb->stat_offsets_loaded,
590 &oes->rx_multicast, &es->rx_multicast);
591 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
592 veb->stat_offsets_loaded,
593 &oes->rx_broadcast, &es->rx_broadcast);
594
595 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
596 veb->stat_offsets_loaded,
597 &oes->tx_bytes, &es->tx_bytes);
598 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
599 veb->stat_offsets_loaded,
600 &oes->tx_unicast, &es->tx_unicast);
601 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
602 veb->stat_offsets_loaded,
603 &oes->tx_multicast, &es->tx_multicast);
604 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
605 veb->stat_offsets_loaded,
606 &oes->tx_broadcast, &es->tx_broadcast);
607 veb->stat_offsets_loaded = true;
608}
609
610/**
611 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
612 * @pf: the corresponding PF
613 *
614 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
615 **/
616static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
617{
618 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
619 struct i40e_hw_port_stats *nsd = &pf->stats;
620 struct i40e_hw *hw = &pf->hw;
621 u64 xoff = 0;
622 u16 i, v;
623
624 if ((hw->fc.current_mode != I40E_FC_FULL) &&
625 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
626 return;
627
628 xoff = nsd->link_xoff_rx;
629 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
630 pf->stat_offsets_loaded,
631 &osd->link_xoff_rx, &nsd->link_xoff_rx);
632
633 /* No new LFC xoff rx */
634 if (!(nsd->link_xoff_rx - xoff))
635 return;
636
637 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
638 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
639 struct i40e_vsi *vsi = pf->vsi[v];
640
641 if (!vsi)
642 continue;
643
644 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 645 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
646 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
647 }
648 }
649}
650
651/**
652 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
653 * @pf: the corresponding PF
654 *
655 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
656 **/
657static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
658{
659 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
660 struct i40e_hw_port_stats *nsd = &pf->stats;
661 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
662 struct i40e_dcbx_config *dcb_cfg;
663 struct i40e_hw *hw = &pf->hw;
664 u16 i, v;
665 u8 tc;
666
667 dcb_cfg = &hw->local_dcbx_config;
668
669 /* See if DCB enabled with PFC TC */
670 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
671 !(dcb_cfg->pfc.pfcenable)) {
672 i40e_update_link_xoff_rx(pf);
673 return;
674 }
675
676 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
677 u64 prio_xoff = nsd->priority_xoff_rx[i];
678 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
679 pf->stat_offsets_loaded,
680 &osd->priority_xoff_rx[i],
681 &nsd->priority_xoff_rx[i]);
682
683 /* No new PFC xoff rx */
684 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
685 continue;
686 /* Get the TC for given priority */
687 tc = dcb_cfg->etscfg.prioritytable[i];
688 xoff[tc] = true;
689 }
690
691 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
692 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
693 struct i40e_vsi *vsi = pf->vsi[v];
694
695 if (!vsi)
696 continue;
697
698 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 699 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
700
701 tc = ring->dcb_tc;
702 if (xoff[tc])
703 clear_bit(__I40E_HANG_CHECK_ARMED,
704 &ring->state);
705 }
706 }
707}
708
709/**
710 * i40e_update_stats - Update the board statistics counters.
711 * @vsi: the VSI to be updated
712 *
713 * There are a few instances where we store the same stat in a
714 * couple of different structs. This is partly because we have
715 * the netdev stats that need to be filled out, which is slightly
716 * different from the "eth_stats" defined by the chip and used in
717 * VF communications. We sort it all out here in a central place.
718 **/
719void i40e_update_stats(struct i40e_vsi *vsi)
720{
721 struct i40e_pf *pf = vsi->back;
722 struct i40e_hw *hw = &pf->hw;
723 struct rtnl_link_stats64 *ons;
724 struct rtnl_link_stats64 *ns; /* netdev stats */
725 struct i40e_eth_stats *oes;
726 struct i40e_eth_stats *es; /* device's eth stats */
727 u32 tx_restart, tx_busy;
728 u32 rx_page, rx_buf;
729 u64 rx_p, rx_b;
730 u64 tx_p, tx_b;
731 int i;
732 u16 q;
733
734 if (test_bit(__I40E_DOWN, &vsi->state) ||
735 test_bit(__I40E_CONFIG_BUSY, &pf->state))
736 return;
737
738 ns = i40e_get_vsi_stats_struct(vsi);
739 ons = &vsi->net_stats_offsets;
740 es = &vsi->eth_stats;
741 oes = &vsi->eth_stats_offsets;
742
743 /* Gather up the netdev and vsi stats that the driver collects
744 * on the fly during packet processing
745 */
746 rx_b = rx_p = 0;
747 tx_b = tx_p = 0;
748 tx_restart = tx_busy = 0;
749 rx_page = 0;
750 rx_buf = 0;
980e9b11 751 rcu_read_lock();
41c445ff
JB
752 for (q = 0; q < vsi->num_queue_pairs; q++) {
753 struct i40e_ring *p;
980e9b11
AD
754 u64 bytes, packets;
755 unsigned int start;
756
757 /* locate Tx ring */
758 p = ACCESS_ONCE(vsi->tx_rings[q]);
759
760 do {
761 start = u64_stats_fetch_begin_bh(&p->syncp);
762 packets = p->stats.packets;
763 bytes = p->stats.bytes;
764 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
765 tx_b += bytes;
766 tx_p += packets;
767 tx_restart += p->tx_stats.restart_queue;
768 tx_busy += p->tx_stats.tx_busy;
41c445ff 769
980e9b11
AD
770 /* Rx queue is part of the same block as Tx queue */
771 p = &p[1];
772 do {
773 start = u64_stats_fetch_begin_bh(&p->syncp);
774 packets = p->stats.packets;
775 bytes = p->stats.bytes;
776 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
777 rx_b += bytes;
778 rx_p += packets;
41c445ff
JB
779 rx_buf += p->rx_stats.alloc_rx_buff_failed;
780 rx_page += p->rx_stats.alloc_rx_page_failed;
41c445ff 781 }
980e9b11 782 rcu_read_unlock();
41c445ff
JB
783 vsi->tx_restart = tx_restart;
784 vsi->tx_busy = tx_busy;
785 vsi->rx_page_failed = rx_page;
786 vsi->rx_buf_failed = rx_buf;
787
788 ns->rx_packets = rx_p;
789 ns->rx_bytes = rx_b;
790 ns->tx_packets = tx_p;
791 ns->tx_bytes = tx_b;
792
793 i40e_update_eth_stats(vsi);
794 /* update netdev stats from eth stats */
795 ons->rx_errors = oes->rx_errors;
796 ns->rx_errors = es->rx_errors;
797 ons->tx_errors = oes->tx_errors;
798 ns->tx_errors = es->tx_errors;
799 ons->multicast = oes->rx_multicast;
800 ns->multicast = es->rx_multicast;
801 ons->tx_dropped = oes->tx_discards;
802 ns->tx_dropped = es->tx_discards;
803
804 /* Get the port data only if this is the main PF VSI */
805 if (vsi == pf->vsi[pf->lan_vsi]) {
806 struct i40e_hw_port_stats *nsd = &pf->stats;
807 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
808
809 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
810 I40E_GLPRT_GORCL(hw->port),
811 pf->stat_offsets_loaded,
812 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
813 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
814 I40E_GLPRT_GOTCL(hw->port),
815 pf->stat_offsets_loaded,
816 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
817 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
818 pf->stat_offsets_loaded,
819 &osd->eth.rx_discards,
820 &nsd->eth.rx_discards);
821 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
822 pf->stat_offsets_loaded,
823 &osd->eth.tx_discards,
824 &nsd->eth.tx_discards);
825 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
826 I40E_GLPRT_MPRCL(hw->port),
827 pf->stat_offsets_loaded,
828 &osd->eth.rx_multicast,
829 &nsd->eth.rx_multicast);
830
831 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
832 pf->stat_offsets_loaded,
833 &osd->tx_dropped_link_down,
834 &nsd->tx_dropped_link_down);
835
836 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
837 pf->stat_offsets_loaded,
838 &osd->crc_errors, &nsd->crc_errors);
839 ns->rx_crc_errors = nsd->crc_errors;
840
841 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
842 pf->stat_offsets_loaded,
843 &osd->illegal_bytes, &nsd->illegal_bytes);
844 ns->rx_errors = nsd->crc_errors
845 + nsd->illegal_bytes;
846
847 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
848 pf->stat_offsets_loaded,
849 &osd->mac_local_faults,
850 &nsd->mac_local_faults);
851 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
852 pf->stat_offsets_loaded,
853 &osd->mac_remote_faults,
854 &nsd->mac_remote_faults);
855
856 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
857 pf->stat_offsets_loaded,
858 &osd->rx_length_errors,
859 &nsd->rx_length_errors);
860 ns->rx_length_errors = nsd->rx_length_errors;
861
862 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
863 pf->stat_offsets_loaded,
864 &osd->link_xon_rx, &nsd->link_xon_rx);
865 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
866 pf->stat_offsets_loaded,
867 &osd->link_xon_tx, &nsd->link_xon_tx);
868 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
869 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
870 pf->stat_offsets_loaded,
871 &osd->link_xoff_tx, &nsd->link_xoff_tx);
872
873 for (i = 0; i < 8; i++) {
874 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
875 pf->stat_offsets_loaded,
876 &osd->priority_xon_rx[i],
877 &nsd->priority_xon_rx[i]);
878 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
879 pf->stat_offsets_loaded,
880 &osd->priority_xon_tx[i],
881 &nsd->priority_xon_tx[i]);
882 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
883 pf->stat_offsets_loaded,
884 &osd->priority_xoff_tx[i],
885 &nsd->priority_xoff_tx[i]);
886 i40e_stat_update32(hw,
887 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
888 pf->stat_offsets_loaded,
889 &osd->priority_xon_2_xoff[i],
890 &nsd->priority_xon_2_xoff[i]);
891 }
892
893 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
894 I40E_GLPRT_PRC64L(hw->port),
895 pf->stat_offsets_loaded,
896 &osd->rx_size_64, &nsd->rx_size_64);
897 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
898 I40E_GLPRT_PRC127L(hw->port),
899 pf->stat_offsets_loaded,
900 &osd->rx_size_127, &nsd->rx_size_127);
901 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
902 I40E_GLPRT_PRC255L(hw->port),
903 pf->stat_offsets_loaded,
904 &osd->rx_size_255, &nsd->rx_size_255);
905 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
906 I40E_GLPRT_PRC511L(hw->port),
907 pf->stat_offsets_loaded,
908 &osd->rx_size_511, &nsd->rx_size_511);
909 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
910 I40E_GLPRT_PRC1023L(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->rx_size_1023, &nsd->rx_size_1023);
913 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
914 I40E_GLPRT_PRC1522L(hw->port),
915 pf->stat_offsets_loaded,
916 &osd->rx_size_1522, &nsd->rx_size_1522);
917 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
918 I40E_GLPRT_PRC9522L(hw->port),
919 pf->stat_offsets_loaded,
920 &osd->rx_size_big, &nsd->rx_size_big);
921
922 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
923 I40E_GLPRT_PTC64L(hw->port),
924 pf->stat_offsets_loaded,
925 &osd->tx_size_64, &nsd->tx_size_64);
926 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
927 I40E_GLPRT_PTC127L(hw->port),
928 pf->stat_offsets_loaded,
929 &osd->tx_size_127, &nsd->tx_size_127);
930 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
931 I40E_GLPRT_PTC255L(hw->port),
932 pf->stat_offsets_loaded,
933 &osd->tx_size_255, &nsd->tx_size_255);
934 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
935 I40E_GLPRT_PTC511L(hw->port),
936 pf->stat_offsets_loaded,
937 &osd->tx_size_511, &nsd->tx_size_511);
938 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
939 I40E_GLPRT_PTC1023L(hw->port),
940 pf->stat_offsets_loaded,
941 &osd->tx_size_1023, &nsd->tx_size_1023);
942 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
943 I40E_GLPRT_PTC1522L(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->tx_size_1522, &nsd->tx_size_1522);
946 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
947 I40E_GLPRT_PTC9522L(hw->port),
948 pf->stat_offsets_loaded,
949 &osd->tx_size_big, &nsd->tx_size_big);
950
951 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
952 pf->stat_offsets_loaded,
953 &osd->rx_undersize, &nsd->rx_undersize);
954 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
955 pf->stat_offsets_loaded,
956 &osd->rx_fragments, &nsd->rx_fragments);
957 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->rx_oversize, &nsd->rx_oversize);
960 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
961 pf->stat_offsets_loaded,
962 &osd->rx_jabber, &nsd->rx_jabber);
963 }
964
965 pf->stat_offsets_loaded = true;
966}
967
968/**
969 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
970 * @vsi: the VSI to be searched
971 * @macaddr: the MAC address
972 * @vlan: the vlan
973 * @is_vf: make sure its a vf filter, else doesn't matter
974 * @is_netdev: make sure its a netdev filter, else doesn't matter
975 *
976 * Returns ptr to the filter object or NULL
977 **/
978static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
979 u8 *macaddr, s16 vlan,
980 bool is_vf, bool is_netdev)
981{
982 struct i40e_mac_filter *f;
983
984 if (!vsi || !macaddr)
985 return NULL;
986
987 list_for_each_entry(f, &vsi->mac_filter_list, list) {
988 if ((ether_addr_equal(macaddr, f->macaddr)) &&
989 (vlan == f->vlan) &&
990 (!is_vf || f->is_vf) &&
991 (!is_netdev || f->is_netdev))
992 return f;
993 }
994 return NULL;
995}
996
997/**
998 * i40e_find_mac - Find a mac addr in the macvlan filters list
999 * @vsi: the VSI to be searched
1000 * @macaddr: the MAC address we are searching for
1001 * @is_vf: make sure its a vf filter, else doesn't matter
1002 * @is_netdev: make sure its a netdev filter, else doesn't matter
1003 *
1004 * Returns the first filter with the provided MAC address or NULL if
1005 * MAC address was not found
1006 **/
1007struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1008 bool is_vf, bool is_netdev)
1009{
1010 struct i40e_mac_filter *f;
1011
1012 if (!vsi || !macaddr)
1013 return NULL;
1014
1015 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1016 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1017 (!is_vf || f->is_vf) &&
1018 (!is_netdev || f->is_netdev))
1019 return f;
1020 }
1021 return NULL;
1022}
1023
1024/**
1025 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1026 * @vsi: the VSI to be searched
1027 *
1028 * Returns true if VSI is in vlan mode or false otherwise
1029 **/
1030bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1031{
1032 struct i40e_mac_filter *f;
1033
1034 /* Only -1 for all the filters denotes not in vlan mode
1035 * so we have to go through all the list in order to make sure
1036 */
1037 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1038 if (f->vlan >= 0)
1039 return true;
1040 }
1041
1042 return false;
1043}
1044
1045/**
1046 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1047 * @vsi: the VSI to be searched
1048 * @macaddr: the mac address to be filtered
1049 * @is_vf: true if it is a vf
1050 * @is_netdev: true if it is a netdev
1051 *
1052 * Goes through all the macvlan filters and adds a
1053 * macvlan filter for each unique vlan that already exists
1054 *
1055 * Returns first filter found on success, else NULL
1056 **/
1057struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1058 bool is_vf, bool is_netdev)
1059{
1060 struct i40e_mac_filter *f;
1061
1062 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1063 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1064 is_vf, is_netdev)) {
1065 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1066 is_vf, is_netdev))
1067 return NULL;
1068 }
1069 }
1070
1071 return list_first_entry_or_null(&vsi->mac_filter_list,
1072 struct i40e_mac_filter, list);
1073}
1074
1075/**
1076 * i40e_add_filter - Add a mac/vlan filter to the VSI
1077 * @vsi: the VSI to be searched
1078 * @macaddr: the MAC address
1079 * @vlan: the vlan
1080 * @is_vf: make sure its a vf filter, else doesn't matter
1081 * @is_netdev: make sure its a netdev filter, else doesn't matter
1082 *
1083 * Returns ptr to the filter object or NULL when no memory available.
1084 **/
1085struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1086 u8 *macaddr, s16 vlan,
1087 bool is_vf, bool is_netdev)
1088{
1089 struct i40e_mac_filter *f;
1090
1091 if (!vsi || !macaddr)
1092 return NULL;
1093
1094 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1095 if (!f) {
1096 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1097 if (!f)
1098 goto add_filter_out;
1099
1100 memcpy(f->macaddr, macaddr, ETH_ALEN);
1101 f->vlan = vlan;
1102 f->changed = true;
1103
1104 INIT_LIST_HEAD(&f->list);
1105 list_add(&f->list, &vsi->mac_filter_list);
1106 }
1107
1108 /* increment counter and add a new flag if needed */
1109 if (is_vf) {
1110 if (!f->is_vf) {
1111 f->is_vf = true;
1112 f->counter++;
1113 }
1114 } else if (is_netdev) {
1115 if (!f->is_netdev) {
1116 f->is_netdev = true;
1117 f->counter++;
1118 }
1119 } else {
1120 f->counter++;
1121 }
1122
1123 /* changed tells sync_filters_subtask to
1124 * push the filter down to the firmware
1125 */
1126 if (f->changed) {
1127 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1128 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1129 }
1130
1131add_filter_out:
1132 return f;
1133}
1134
1135/**
1136 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1137 * @vsi: the VSI to be searched
1138 * @macaddr: the MAC address
1139 * @vlan: the vlan
1140 * @is_vf: make sure it's a vf filter, else doesn't matter
1141 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1142 **/
1143void i40e_del_filter(struct i40e_vsi *vsi,
1144 u8 *macaddr, s16 vlan,
1145 bool is_vf, bool is_netdev)
1146{
1147 struct i40e_mac_filter *f;
1148
1149 if (!vsi || !macaddr)
1150 return;
1151
1152 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1153 if (!f || f->counter == 0)
1154 return;
1155
1156 if (is_vf) {
1157 if (f->is_vf) {
1158 f->is_vf = false;
1159 f->counter--;
1160 }
1161 } else if (is_netdev) {
1162 if (f->is_netdev) {
1163 f->is_netdev = false;
1164 f->counter--;
1165 }
1166 } else {
1167 /* make sure we don't remove a filter in use by vf or netdev */
1168 int min_f = 0;
1169 min_f += (f->is_vf ? 1 : 0);
1170 min_f += (f->is_netdev ? 1 : 0);
1171
1172 if (f->counter > min_f)
1173 f->counter--;
1174 }
1175
1176 /* counter == 0 tells sync_filters_subtask to
1177 * remove the filter from the firmware's list
1178 */
1179 if (f->counter == 0) {
1180 f->changed = true;
1181 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1182 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1183 }
1184}
1185
1186/**
1187 * i40e_set_mac - NDO callback to set mac address
1188 * @netdev: network interface device structure
1189 * @p: pointer to an address structure
1190 *
1191 * Returns 0 on success, negative on failure
1192 **/
1193static int i40e_set_mac(struct net_device *netdev, void *p)
1194{
1195 struct i40e_netdev_priv *np = netdev_priv(netdev);
1196 struct i40e_vsi *vsi = np->vsi;
1197 struct sockaddr *addr = p;
1198 struct i40e_mac_filter *f;
1199
1200 if (!is_valid_ether_addr(addr->sa_data))
1201 return -EADDRNOTAVAIL;
1202
1203 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1204
1205 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1206 return 0;
1207
1208 if (vsi->type == I40E_VSI_MAIN) {
1209 i40e_status ret;
1210 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1211 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1212 addr->sa_data, NULL);
1213 if (ret) {
1214 netdev_info(netdev,
1215 "Addr change for Main VSI failed: %d\n",
1216 ret);
1217 return -EADDRNOTAVAIL;
1218 }
1219
1220 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1221 }
1222
1223 /* In order to be sure to not drop any packets, add the new address
1224 * then delete the old one.
1225 */
1226 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1227 if (!f)
1228 return -ENOMEM;
1229
1230 i40e_sync_vsi_filters(vsi);
1231 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1232 i40e_sync_vsi_filters(vsi);
1233
1234 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1235
1236 return 0;
1237}
1238
1239/**
1240 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1241 * @vsi: the VSI being setup
1242 * @ctxt: VSI context structure
1243 * @enabled_tc: Enabled TCs bitmap
1244 * @is_add: True if called before Add VSI
1245 *
1246 * Setup VSI queue mapping for enabled traffic classes.
1247 **/
1248static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1249 struct i40e_vsi_context *ctxt,
1250 u8 enabled_tc,
1251 bool is_add)
1252{
1253 struct i40e_pf *pf = vsi->back;
1254 u16 sections = 0;
1255 u8 netdev_tc = 0;
1256 u16 numtc = 0;
1257 u16 qcount;
1258 u8 offset;
1259 u16 qmap;
1260 int i;
1261
1262 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1263 offset = 0;
1264
1265 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1266 /* Find numtc from enabled TC bitmap */
1267 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1268 if (enabled_tc & (1 << i)) /* TC is enabled */
1269 numtc++;
1270 }
1271 if (!numtc) {
1272 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1273 numtc = 1;
1274 }
1275 } else {
1276 /* At least TC0 is enabled in case of non-DCB case */
1277 numtc = 1;
1278 }
1279
1280 vsi->tc_config.numtc = numtc;
1281 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1282
1283 /* Setup queue offset/count for all TCs for given VSI */
1284 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1285 /* See if the given TC is enabled for the given VSI */
1286 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1287 int pow, num_qps;
1288
1289 vsi->tc_config.tc_info[i].qoffset = offset;
1290 switch (vsi->type) {
1291 case I40E_VSI_MAIN:
1292 if (i == 0)
1293 qcount = pf->rss_size;
1294 else
1295 qcount = pf->num_tc_qps;
1296 vsi->tc_config.tc_info[i].qcount = qcount;
1297 break;
1298 case I40E_VSI_FDIR:
1299 case I40E_VSI_SRIOV:
1300 case I40E_VSI_VMDQ2:
1301 default:
1302 qcount = vsi->alloc_queue_pairs;
1303 vsi->tc_config.tc_info[i].qcount = qcount;
1304 WARN_ON(i != 0);
1305 break;
1306 }
1307
1308 /* find the power-of-2 of the number of queue pairs */
1309 num_qps = vsi->tc_config.tc_info[i].qcount;
1310 pow = 0;
1311 while (num_qps &&
1312 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
1313 pow++;
1314 num_qps >>= 1;
1315 }
1316
1317 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1318 qmap =
1319 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1320 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1321
1322 offset += vsi->tc_config.tc_info[i].qcount;
1323 } else {
1324 /* TC is not enabled so set the offset to
1325 * default queue and allocate one queue
1326 * for the given TC.
1327 */
1328 vsi->tc_config.tc_info[i].qoffset = 0;
1329 vsi->tc_config.tc_info[i].qcount = 1;
1330 vsi->tc_config.tc_info[i].netdev_tc = 0;
1331
1332 qmap = 0;
1333 }
1334 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1335 }
1336
1337 /* Set actual Tx/Rx queue pairs */
1338 vsi->num_queue_pairs = offset;
1339
1340 /* Scheduler section valid can only be set for ADD VSI */
1341 if (is_add) {
1342 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1343
1344 ctxt->info.up_enable_bits = enabled_tc;
1345 }
1346 if (vsi->type == I40E_VSI_SRIOV) {
1347 ctxt->info.mapping_flags |=
1348 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1349 for (i = 0; i < vsi->num_queue_pairs; i++)
1350 ctxt->info.queue_mapping[i] =
1351 cpu_to_le16(vsi->base_queue + i);
1352 } else {
1353 ctxt->info.mapping_flags |=
1354 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1355 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1356 }
1357 ctxt->info.valid_sections |= cpu_to_le16(sections);
1358}
1359
1360/**
1361 * i40e_set_rx_mode - NDO callback to set the netdev filters
1362 * @netdev: network interface device structure
1363 **/
1364static void i40e_set_rx_mode(struct net_device *netdev)
1365{
1366 struct i40e_netdev_priv *np = netdev_priv(netdev);
1367 struct i40e_mac_filter *f, *ftmp;
1368 struct i40e_vsi *vsi = np->vsi;
1369 struct netdev_hw_addr *uca;
1370 struct netdev_hw_addr *mca;
1371 struct netdev_hw_addr *ha;
1372
1373 /* add addr if not already in the filter list */
1374 netdev_for_each_uc_addr(uca, netdev) {
1375 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1376 if (i40e_is_vsi_in_vlan(vsi))
1377 i40e_put_mac_in_vlan(vsi, uca->addr,
1378 false, true);
1379 else
1380 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1381 false, true);
1382 }
1383 }
1384
1385 netdev_for_each_mc_addr(mca, netdev) {
1386 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1387 if (i40e_is_vsi_in_vlan(vsi))
1388 i40e_put_mac_in_vlan(vsi, mca->addr,
1389 false, true);
1390 else
1391 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1392 false, true);
1393 }
1394 }
1395
1396 /* remove filter if not in netdev list */
1397 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1398 bool found = false;
1399
1400 if (!f->is_netdev)
1401 continue;
1402
1403 if (is_multicast_ether_addr(f->macaddr)) {
1404 netdev_for_each_mc_addr(mca, netdev) {
1405 if (ether_addr_equal(mca->addr, f->macaddr)) {
1406 found = true;
1407 break;
1408 }
1409 }
1410 } else {
1411 netdev_for_each_uc_addr(uca, netdev) {
1412 if (ether_addr_equal(uca->addr, f->macaddr)) {
1413 found = true;
1414 break;
1415 }
1416 }
1417
1418 for_each_dev_addr(netdev, ha) {
1419 if (ether_addr_equal(ha->addr, f->macaddr)) {
1420 found = true;
1421 break;
1422 }
1423 }
1424 }
1425 if (!found)
1426 i40e_del_filter(
1427 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1428 }
1429
1430 /* check for other flag changes */
1431 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1432 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1433 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1434 }
1435}
1436
1437/**
1438 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1439 * @vsi: ptr to the VSI
1440 *
1441 * Push any outstanding VSI filter changes through the AdminQ.
1442 *
1443 * Returns 0 or error value
1444 **/
1445int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1446{
1447 struct i40e_mac_filter *f, *ftmp;
1448 bool promisc_forced_on = false;
1449 bool add_happened = false;
1450 int filter_list_len = 0;
1451 u32 changed_flags = 0;
dcae29be 1452 i40e_status aq_ret = 0;
41c445ff
JB
1453 struct i40e_pf *pf;
1454 int num_add = 0;
1455 int num_del = 0;
1456 u16 cmd_flags;
1457
1458 /* empty array typed pointers, kcalloc later */
1459 struct i40e_aqc_add_macvlan_element_data *add_list;
1460 struct i40e_aqc_remove_macvlan_element_data *del_list;
1461
1462 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1463 usleep_range(1000, 2000);
1464 pf = vsi->back;
1465
1466 if (vsi->netdev) {
1467 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1468 vsi->current_netdev_flags = vsi->netdev->flags;
1469 }
1470
1471 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1472 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1473
1474 filter_list_len = pf->hw.aq.asq_buf_size /
1475 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1476 del_list = kcalloc(filter_list_len,
1477 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1478 GFP_KERNEL);
1479 if (!del_list)
1480 return -ENOMEM;
1481
1482 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1483 if (!f->changed)
1484 continue;
1485
1486 if (f->counter != 0)
1487 continue;
1488 f->changed = false;
1489 cmd_flags = 0;
1490
1491 /* add to delete list */
1492 memcpy(del_list[num_del].mac_addr,
1493 f->macaddr, ETH_ALEN);
1494 del_list[num_del].vlan_tag =
1495 cpu_to_le16((u16)(f->vlan ==
1496 I40E_VLAN_ANY ? 0 : f->vlan));
1497
1498 /* vlan0 as wild card to allow packets from all vlans */
1499 if (f->vlan == I40E_VLAN_ANY ||
1500 (vsi->netdev && !(vsi->netdev->features &
1501 NETIF_F_HW_VLAN_CTAG_FILTER)))
1502 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1503 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1504 del_list[num_del].flags = cmd_flags;
1505 num_del++;
1506
1507 /* unlink from filter list */
1508 list_del(&f->list);
1509 kfree(f);
1510
1511 /* flush a full buffer */
1512 if (num_del == filter_list_len) {
dcae29be 1513 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
41c445ff
JB
1514 vsi->seid, del_list, num_del,
1515 NULL);
1516 num_del = 0;
1517 memset(del_list, 0, sizeof(*del_list));
1518
dcae29be 1519 if (aq_ret)
41c445ff
JB
1520 dev_info(&pf->pdev->dev,
1521 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
dcae29be 1522 aq_ret,
41c445ff
JB
1523 pf->hw.aq.asq_last_status);
1524 }
1525 }
1526 if (num_del) {
dcae29be 1527 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
41c445ff
JB
1528 del_list, num_del, NULL);
1529 num_del = 0;
1530
dcae29be 1531 if (aq_ret)
41c445ff
JB
1532 dev_info(&pf->pdev->dev,
1533 "ignoring delete macvlan error, err %d, aq_err %d\n",
dcae29be 1534 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1535 }
1536
1537 kfree(del_list);
1538 del_list = NULL;
1539
1540 /* do all the adds now */
1541 filter_list_len = pf->hw.aq.asq_buf_size /
1542 sizeof(struct i40e_aqc_add_macvlan_element_data),
1543 add_list = kcalloc(filter_list_len,
1544 sizeof(struct i40e_aqc_add_macvlan_element_data),
1545 GFP_KERNEL);
1546 if (!add_list)
1547 return -ENOMEM;
1548
1549 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1550 if (!f->changed)
1551 continue;
1552
1553 if (f->counter == 0)
1554 continue;
1555 f->changed = false;
1556 add_happened = true;
1557 cmd_flags = 0;
1558
1559 /* add to add array */
1560 memcpy(add_list[num_add].mac_addr,
1561 f->macaddr, ETH_ALEN);
1562 add_list[num_add].vlan_tag =
1563 cpu_to_le16(
1564 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1565 add_list[num_add].queue_number = 0;
1566
1567 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1568
1569 /* vlan0 as wild card to allow packets from all vlans */
1570 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
1571 !(vsi->netdev->features &
1572 NETIF_F_HW_VLAN_CTAG_FILTER)))
1573 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1574 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1575 num_add++;
1576
1577 /* flush a full buffer */
1578 if (num_add == filter_list_len) {
dcae29be
JB
1579 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1580 add_list, num_add,
1581 NULL);
41c445ff
JB
1582 num_add = 0;
1583
dcae29be 1584 if (aq_ret)
41c445ff
JB
1585 break;
1586 memset(add_list, 0, sizeof(*add_list));
1587 }
1588 }
1589 if (num_add) {
dcae29be
JB
1590 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1591 add_list, num_add, NULL);
41c445ff
JB
1592 num_add = 0;
1593 }
1594 kfree(add_list);
1595 add_list = NULL;
1596
dcae29be 1597 if (add_happened && (!aq_ret)) {
41c445ff 1598 /* do nothing */;
dcae29be 1599 } else if (add_happened && (aq_ret)) {
41c445ff
JB
1600 dev_info(&pf->pdev->dev,
1601 "add filter failed, err %d, aq_err %d\n",
dcae29be 1602 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1603 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1604 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1605 &vsi->state)) {
1606 promisc_forced_on = true;
1607 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1608 &vsi->state);
1609 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1610 }
1611 }
1612 }
1613
1614 /* check for changes in promiscuous modes */
1615 if (changed_flags & IFF_ALLMULTI) {
1616 bool cur_multipromisc;
1617 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
dcae29be
JB
1618 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1619 vsi->seid,
1620 cur_multipromisc,
1621 NULL);
1622 if (aq_ret)
41c445ff
JB
1623 dev_info(&pf->pdev->dev,
1624 "set multi promisc failed, err %d, aq_err %d\n",
dcae29be 1625 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1626 }
1627 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1628 bool cur_promisc;
1629 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1630 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1631 &vsi->state));
dcae29be
JB
1632 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1633 vsi->seid,
1634 cur_promisc, NULL);
1635 if (aq_ret)
41c445ff
JB
1636 dev_info(&pf->pdev->dev,
1637 "set uni promisc failed, err %d, aq_err %d\n",
dcae29be 1638 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1639 }
1640
1641 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1642 return 0;
1643}
1644
1645/**
1646 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1647 * @pf: board private structure
1648 **/
1649static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1650{
1651 int v;
1652
1653 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1654 return;
1655 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1656
1657 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1658 if (pf->vsi[v] &&
1659 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1660 i40e_sync_vsi_filters(pf->vsi[v]);
1661 }
1662}
1663
1664/**
1665 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1666 * @netdev: network interface device structure
1667 * @new_mtu: new value for maximum frame size
1668 *
1669 * Returns 0 on success, negative on failure
1670 **/
1671static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1672{
1673 struct i40e_netdev_priv *np = netdev_priv(netdev);
1674 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1675 struct i40e_vsi *vsi = np->vsi;
1676
1677 /* MTU < 68 is an error and causes problems on some kernels */
1678 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1679 return -EINVAL;
1680
1681 netdev_info(netdev, "changing MTU from %d to %d\n",
1682 netdev->mtu, new_mtu);
1683 netdev->mtu = new_mtu;
1684 if (netif_running(netdev))
1685 i40e_vsi_reinit_locked(vsi);
1686
1687 return 0;
1688}
1689
1690/**
1691 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1692 * @vsi: the vsi being adjusted
1693 **/
1694void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1695{
1696 struct i40e_vsi_context ctxt;
1697 i40e_status ret;
1698
1699 if ((vsi->info.valid_sections &
1700 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1701 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1702 return; /* already enabled */
1703
1704 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1705 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1706 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1707
1708 ctxt.seid = vsi->seid;
1709 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1710 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1711 if (ret) {
1712 dev_info(&vsi->back->pdev->dev,
1713 "%s: update vsi failed, aq_err=%d\n",
1714 __func__, vsi->back->hw.aq.asq_last_status);
1715 }
1716}
1717
1718/**
1719 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1720 * @vsi: the vsi being adjusted
1721 **/
1722void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1723{
1724 struct i40e_vsi_context ctxt;
1725 i40e_status ret;
1726
1727 if ((vsi->info.valid_sections &
1728 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1729 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1730 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1731 return; /* already disabled */
1732
1733 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1734 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1735 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1736
1737 ctxt.seid = vsi->seid;
1738 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1739 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1740 if (ret) {
1741 dev_info(&vsi->back->pdev->dev,
1742 "%s: update vsi failed, aq_err=%d\n",
1743 __func__, vsi->back->hw.aq.asq_last_status);
1744 }
1745}
1746
1747/**
1748 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1749 * @netdev: network interface to be adjusted
1750 * @features: netdev features to test if VLAN offload is enabled or not
1751 **/
1752static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1753{
1754 struct i40e_netdev_priv *np = netdev_priv(netdev);
1755 struct i40e_vsi *vsi = np->vsi;
1756
1757 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1758 i40e_vlan_stripping_enable(vsi);
1759 else
1760 i40e_vlan_stripping_disable(vsi);
1761}
1762
1763/**
1764 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1765 * @vsi: the vsi being configured
1766 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1767 **/
1768int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1769{
1770 struct i40e_mac_filter *f, *add_f;
1771 bool is_netdev, is_vf;
1772 int ret;
1773
1774 is_vf = (vsi->type == I40E_VSI_SRIOV);
1775 is_netdev = !!(vsi->netdev);
1776
1777 if (is_netdev) {
1778 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1779 is_vf, is_netdev);
1780 if (!add_f) {
1781 dev_info(&vsi->back->pdev->dev,
1782 "Could not add vlan filter %d for %pM\n",
1783 vid, vsi->netdev->dev_addr);
1784 return -ENOMEM;
1785 }
1786 }
1787
1788 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1789 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1790 if (!add_f) {
1791 dev_info(&vsi->back->pdev->dev,
1792 "Could not add vlan filter %d for %pM\n",
1793 vid, f->macaddr);
1794 return -ENOMEM;
1795 }
1796 }
1797
1798 ret = i40e_sync_vsi_filters(vsi);
1799 if (ret) {
1800 dev_info(&vsi->back->pdev->dev,
1801 "Could not sync filters for vid %d\n", vid);
1802 return ret;
1803 }
1804
1805 /* Now if we add a vlan tag, make sure to check if it is the first
1806 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1807 * with 0, so we now accept untagged and specified tagged traffic
1808 * (and not any taged and untagged)
1809 */
1810 if (vid > 0) {
1811 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1812 I40E_VLAN_ANY,
1813 is_vf, is_netdev)) {
1814 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1815 I40E_VLAN_ANY, is_vf, is_netdev);
1816 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1817 is_vf, is_netdev);
1818 if (!add_f) {
1819 dev_info(&vsi->back->pdev->dev,
1820 "Could not add filter 0 for %pM\n",
1821 vsi->netdev->dev_addr);
1822 return -ENOMEM;
1823 }
1824 }
1825
1826 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1827 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1828 is_vf, is_netdev)) {
1829 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1830 is_vf, is_netdev);
1831 add_f = i40e_add_filter(vsi, f->macaddr,
1832 0, is_vf, is_netdev);
1833 if (!add_f) {
1834 dev_info(&vsi->back->pdev->dev,
1835 "Could not add filter 0 for %pM\n",
1836 f->macaddr);
1837 return -ENOMEM;
1838 }
1839 }
1840 }
1841 ret = i40e_sync_vsi_filters(vsi);
1842 }
1843
1844 return ret;
1845}
1846
1847/**
1848 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1849 * @vsi: the vsi being configured
1850 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
078b5876
JB
1851 *
1852 * Return: 0 on success or negative otherwise
41c445ff
JB
1853 **/
1854int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1855{
1856 struct net_device *netdev = vsi->netdev;
1857 struct i40e_mac_filter *f, *add_f;
1858 bool is_vf, is_netdev;
1859 int filter_count = 0;
1860 int ret;
1861
1862 is_vf = (vsi->type == I40E_VSI_SRIOV);
1863 is_netdev = !!(netdev);
1864
1865 if (is_netdev)
1866 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1867
1868 list_for_each_entry(f, &vsi->mac_filter_list, list)
1869 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1870
1871 ret = i40e_sync_vsi_filters(vsi);
1872 if (ret) {
1873 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
1874 return ret;
1875 }
1876
1877 /* go through all the filters for this VSI and if there is only
1878 * vid == 0 it means there are no other filters, so vid 0 must
1879 * be replaced with -1. This signifies that we should from now
1880 * on accept any traffic (with any tag present, or untagged)
1881 */
1882 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1883 if (is_netdev) {
1884 if (f->vlan &&
1885 ether_addr_equal(netdev->dev_addr, f->macaddr))
1886 filter_count++;
1887 }
1888
1889 if (f->vlan)
1890 filter_count++;
1891 }
1892
1893 if (!filter_count && is_netdev) {
1894 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1895 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1896 is_vf, is_netdev);
1897 if (!f) {
1898 dev_info(&vsi->back->pdev->dev,
1899 "Could not add filter %d for %pM\n",
1900 I40E_VLAN_ANY, netdev->dev_addr);
1901 return -ENOMEM;
1902 }
1903 }
1904
1905 if (!filter_count) {
1906 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1907 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1908 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1909 is_vf, is_netdev);
1910 if (!add_f) {
1911 dev_info(&vsi->back->pdev->dev,
1912 "Could not add filter %d for %pM\n",
1913 I40E_VLAN_ANY, f->macaddr);
1914 return -ENOMEM;
1915 }
1916 }
1917 }
1918
1919 return i40e_sync_vsi_filters(vsi);
1920}
1921
1922/**
1923 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1924 * @netdev: network interface to be adjusted
1925 * @vid: vlan id to be added
078b5876
JB
1926 *
1927 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1928 **/
1929static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1930 __always_unused __be16 proto, u16 vid)
1931{
1932 struct i40e_netdev_priv *np = netdev_priv(netdev);
1933 struct i40e_vsi *vsi = np->vsi;
078b5876 1934 int ret = 0;
41c445ff
JB
1935
1936 if (vid > 4095)
078b5876
JB
1937 return -EINVAL;
1938
1939 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
41c445ff 1940
41c445ff
JB
1941 /* If the network stack called us with vid = 0, we should
1942 * indicate to i40e_vsi_add_vlan() that we want to receive
1943 * any traffic (i.e. with any vlan tag, or untagged)
1944 */
1945 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1946
078b5876
JB
1947 if (!ret && (vid < VLAN_N_VID))
1948 set_bit(vid, vsi->active_vlans);
41c445ff 1949
078b5876 1950 return ret;
41c445ff
JB
1951}
1952
1953/**
1954 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1955 * @netdev: network interface to be adjusted
1956 * @vid: vlan id to be removed
078b5876
JB
1957 *
1958 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1959 **/
1960static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1961 __always_unused __be16 proto, u16 vid)
1962{
1963 struct i40e_netdev_priv *np = netdev_priv(netdev);
1964 struct i40e_vsi *vsi = np->vsi;
1965
078b5876
JB
1966 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1967
41c445ff
JB
1968 /* return code is ignored as there is nothing a user
1969 * can do about failure to remove and a log message was
078b5876 1970 * already printed from the other function
41c445ff
JB
1971 */
1972 i40e_vsi_kill_vlan(vsi, vid);
1973
1974 clear_bit(vid, vsi->active_vlans);
078b5876 1975
41c445ff
JB
1976 return 0;
1977}
1978
1979/**
1980 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
1981 * @vsi: the vsi being brought back up
1982 **/
1983static void i40e_restore_vlan(struct i40e_vsi *vsi)
1984{
1985 u16 vid;
1986
1987 if (!vsi->netdev)
1988 return;
1989
1990 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
1991
1992 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
1993 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
1994 vid);
1995}
1996
1997/**
1998 * i40e_vsi_add_pvid - Add pvid for the VSI
1999 * @vsi: the vsi being adjusted
2000 * @vid: the vlan id to set as a PVID
2001 **/
dcae29be 2002int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2003{
2004 struct i40e_vsi_context ctxt;
dcae29be 2005 i40e_status aq_ret;
41c445ff
JB
2006
2007 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2008 vsi->info.pvid = cpu_to_le16(vid);
2009 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
2010 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2011
2012 ctxt.seid = vsi->seid;
2013 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
dcae29be
JB
2014 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2015 if (aq_ret) {
41c445ff
JB
2016 dev_info(&vsi->back->pdev->dev,
2017 "%s: update vsi failed, aq_err=%d\n",
2018 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 2019 return -ENOENT;
41c445ff
JB
2020 }
2021
dcae29be 2022 return 0;
41c445ff
JB
2023}
2024
2025/**
2026 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2027 * @vsi: the vsi being adjusted
2028 *
2029 * Just use the vlan_rx_register() service to put it back to normal
2030 **/
2031void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2032{
2033 vsi->info.pvid = 0;
2034 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2035}
2036
2037/**
2038 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2039 * @vsi: ptr to the VSI
2040 *
2041 * If this function returns with an error, then it's possible one or
2042 * more of the rings is populated (while the rest are not). It is the
2043 * callers duty to clean those orphaned rings.
2044 *
2045 * Return 0 on success, negative on failure
2046 **/
2047static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2048{
2049 int i, err = 0;
2050
2051 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2052 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff
JB
2053
2054 return err;
2055}
2056
2057/**
2058 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2059 * @vsi: ptr to the VSI
2060 *
2061 * Free VSI's transmit software resources
2062 **/
2063static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2064{
2065 int i;
2066
2067 for (i = 0; i < vsi->num_queue_pairs; i++)
9f65e15b
AD
2068 if (vsi->tx_rings[i]->desc)
2069 i40e_free_tx_resources(vsi->tx_rings[i]);
41c445ff
JB
2070}
2071
2072/**
2073 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2074 * @vsi: ptr to the VSI
2075 *
2076 * If this function returns with an error, then it's possible one or
2077 * more of the rings is populated (while the rest are not). It is the
2078 * callers duty to clean those orphaned rings.
2079 *
2080 * Return 0 on success, negative on failure
2081 **/
2082static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2083{
2084 int i, err = 0;
2085
2086 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2087 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
41c445ff
JB
2088 return err;
2089}
2090
2091/**
2092 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2093 * @vsi: ptr to the VSI
2094 *
2095 * Free all receive software resources
2096 **/
2097static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2098{
2099 int i;
2100
2101 for (i = 0; i < vsi->num_queue_pairs; i++)
9f65e15b
AD
2102 if (vsi->rx_rings[i]->desc)
2103 i40e_free_rx_resources(vsi->rx_rings[i]);
41c445ff
JB
2104}
2105
2106/**
2107 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2108 * @ring: The Tx ring to configure
2109 *
2110 * Configure the Tx descriptor ring in the HMC context.
2111 **/
2112static int i40e_configure_tx_ring(struct i40e_ring *ring)
2113{
2114 struct i40e_vsi *vsi = ring->vsi;
2115 u16 pf_q = vsi->base_queue + ring->queue_index;
2116 struct i40e_hw *hw = &vsi->back->hw;
2117 struct i40e_hmc_obj_txq tx_ctx;
2118 i40e_status err = 0;
2119 u32 qtx_ctl = 0;
2120
2121 /* some ATR related tx ring init */
2122 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
2123 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2124 ring->atr_count = 0;
2125 } else {
2126 ring->atr_sample_rate = 0;
2127 }
2128
2129 /* initialize XPS */
2130 if (ring->q_vector && ring->netdev &&
2131 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2132 netif_set_xps_queue(ring->netdev,
2133 &ring->q_vector->affinity_mask,
2134 ring->queue_index);
2135
2136 /* clear the context structure first */
2137 memset(&tx_ctx, 0, sizeof(tx_ctx));
2138
2139 tx_ctx.new_context = 1;
2140 tx_ctx.base = (ring->dma / 128);
2141 tx_ctx.qlen = ring->count;
2142 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
2143 I40E_FLAG_FDIR_ATR_ENABLED));
2144
2145 /* As part of VSI creation/update, FW allocates certain
2146 * Tx arbitration queue sets for each TC enabled for
2147 * the VSI. The FW returns the handles to these queue
2148 * sets as part of the response buffer to Add VSI,
2149 * Update VSI, etc. AQ commands. It is expected that
2150 * these queue set handles be associated with the Tx
2151 * queues by the driver as part of the TX queue context
2152 * initialization. This has to be done regardless of
2153 * DCB as by default everything is mapped to TC0.
2154 */
2155 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2156 tx_ctx.rdylist_act = 0;
2157
2158 /* clear the context in the HMC */
2159 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2160 if (err) {
2161 dev_info(&vsi->back->pdev->dev,
2162 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2163 ring->queue_index, pf_q, err);
2164 return -ENOMEM;
2165 }
2166
2167 /* set the context in the HMC */
2168 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2169 if (err) {
2170 dev_info(&vsi->back->pdev->dev,
2171 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2172 ring->queue_index, pf_q, err);
2173 return -ENOMEM;
2174 }
2175
2176 /* Now associate this queue with this PCI function */
2177 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
13fd9774
SN
2178 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2179 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2180 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2181 i40e_flush(hw);
2182
2183 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2184
2185 /* cache tail off for easier writes later */
2186 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2187
2188 return 0;
2189}
2190
2191/**
2192 * i40e_configure_rx_ring - Configure a receive ring context
2193 * @ring: The Rx ring to configure
2194 *
2195 * Configure the Rx descriptor ring in the HMC context.
2196 **/
2197static int i40e_configure_rx_ring(struct i40e_ring *ring)
2198{
2199 struct i40e_vsi *vsi = ring->vsi;
2200 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2201 u16 pf_q = vsi->base_queue + ring->queue_index;
2202 struct i40e_hw *hw = &vsi->back->hw;
2203 struct i40e_hmc_obj_rxq rx_ctx;
2204 i40e_status err = 0;
2205
2206 ring->state = 0;
2207
2208 /* clear the context structure first */
2209 memset(&rx_ctx, 0, sizeof(rx_ctx));
2210
2211 ring->rx_buf_len = vsi->rx_buf_len;
2212 ring->rx_hdr_len = vsi->rx_hdr_len;
2213
2214 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2215 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2216
2217 rx_ctx.base = (ring->dma / 128);
2218 rx_ctx.qlen = ring->count;
2219
2220 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2221 set_ring_16byte_desc_enabled(ring);
2222 rx_ctx.dsize = 0;
2223 } else {
2224 rx_ctx.dsize = 1;
2225 }
2226
2227 rx_ctx.dtype = vsi->dtype;
2228 if (vsi->dtype) {
2229 set_ring_ps_enabled(ring);
2230 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2231 I40E_RX_SPLIT_IP |
2232 I40E_RX_SPLIT_TCP_UDP |
2233 I40E_RX_SPLIT_SCTP;
2234 } else {
2235 rx_ctx.hsplit_0 = 0;
2236 }
2237
2238 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2239 (chain_len * ring->rx_buf_len));
2240 rx_ctx.tphrdesc_ena = 1;
2241 rx_ctx.tphwdesc_ena = 1;
2242 rx_ctx.tphdata_ena = 1;
2243 rx_ctx.tphhead_ena = 1;
7134f9ce
JB
2244 if (hw->revision_id == 0)
2245 rx_ctx.lrxqthresh = 0;
2246 else
2247 rx_ctx.lrxqthresh = 2;
41c445ff
JB
2248 rx_ctx.crcstrip = 1;
2249 rx_ctx.l2tsel = 1;
2250 rx_ctx.showiv = 1;
2251
2252 /* clear the context in the HMC */
2253 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2254 if (err) {
2255 dev_info(&vsi->back->pdev->dev,
2256 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2257 ring->queue_index, pf_q, err);
2258 return -ENOMEM;
2259 }
2260
2261 /* set the context in the HMC */
2262 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2263 if (err) {
2264 dev_info(&vsi->back->pdev->dev,
2265 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2266 ring->queue_index, pf_q, err);
2267 return -ENOMEM;
2268 }
2269
2270 /* cache tail for quicker writes, and clear the reg before use */
2271 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2272 writel(0, ring->tail);
2273
2274 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2275
2276 return 0;
2277}
2278
2279/**
2280 * i40e_vsi_configure_tx - Configure the VSI for Tx
2281 * @vsi: VSI structure describing this set of rings and resources
2282 *
2283 * Configure the Tx VSI for operation.
2284 **/
2285static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2286{
2287 int err = 0;
2288 u16 i;
2289
9f65e15b
AD
2290 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2291 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff
JB
2292
2293 return err;
2294}
2295
2296/**
2297 * i40e_vsi_configure_rx - Configure the VSI for Rx
2298 * @vsi: the VSI being configured
2299 *
2300 * Configure the Rx VSI for operation.
2301 **/
2302static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2303{
2304 int err = 0;
2305 u16 i;
2306
2307 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2308 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2309 + ETH_FCS_LEN + VLAN_HLEN;
2310 else
2311 vsi->max_frame = I40E_RXBUFFER_2048;
2312
2313 /* figure out correct receive buffer length */
2314 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2315 I40E_FLAG_RX_PS_ENABLED)) {
2316 case I40E_FLAG_RX_1BUF_ENABLED:
2317 vsi->rx_hdr_len = 0;
2318 vsi->rx_buf_len = vsi->max_frame;
2319 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2320 break;
2321 case I40E_FLAG_RX_PS_ENABLED:
2322 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2323 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2324 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2325 break;
2326 default:
2327 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2328 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2329 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2330 break;
2331 }
2332
2333 /* round up for the chip's needs */
2334 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2335 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2336 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2337 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2338
2339 /* set up individual rings */
2340 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2341 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
2342
2343 return err;
2344}
2345
2346/**
2347 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2348 * @vsi: ptr to the VSI
2349 **/
2350static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2351{
2352 u16 qoffset, qcount;
2353 int i, n;
2354
2355 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2356 return;
2357
2358 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2359 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2360 continue;
2361
2362 qoffset = vsi->tc_config.tc_info[n].qoffset;
2363 qcount = vsi->tc_config.tc_info[n].qcount;
2364 for (i = qoffset; i < (qoffset + qcount); i++) {
9f65e15b
AD
2365 struct i40e_ring *rx_ring = vsi->rx_rings[i];
2366 struct i40e_ring *tx_ring = vsi->tx_rings[i];
41c445ff
JB
2367 rx_ring->dcb_tc = n;
2368 tx_ring->dcb_tc = n;
2369 }
2370 }
2371}
2372
2373/**
2374 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2375 * @vsi: ptr to the VSI
2376 **/
2377static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2378{
2379 if (vsi->netdev)
2380 i40e_set_rx_mode(vsi->netdev);
2381}
2382
2383/**
2384 * i40e_vsi_configure - Set up the VSI for action
2385 * @vsi: the VSI being configured
2386 **/
2387static int i40e_vsi_configure(struct i40e_vsi *vsi)
2388{
2389 int err;
2390
2391 i40e_set_vsi_rx_mode(vsi);
2392 i40e_restore_vlan(vsi);
2393 i40e_vsi_config_dcb_rings(vsi);
2394 err = i40e_vsi_configure_tx(vsi);
2395 if (!err)
2396 err = i40e_vsi_configure_rx(vsi);
2397
2398 return err;
2399}
2400
2401/**
2402 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2403 * @vsi: the VSI being configured
2404 **/
2405static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2406{
2407 struct i40e_pf *pf = vsi->back;
2408 struct i40e_q_vector *q_vector;
2409 struct i40e_hw *hw = &pf->hw;
2410 u16 vector;
2411 int i, q;
2412 u32 val;
2413 u32 qp;
2414
2415 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2416 * and PFINT_LNKLSTn registers, e.g.:
2417 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2418 */
2419 qp = vsi->base_queue;
2420 vector = vsi->base_vector;
493fb300
AD
2421 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2422 q_vector = vsi->q_vectors[i];
41c445ff
JB
2423 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2424 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2425 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2426 q_vector->rx.itr);
2427 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2428 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2429 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2430 q_vector->tx.itr);
2431
2432 /* Linked list for the queuepairs assigned to this vector */
2433 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2434 for (q = 0; q < q_vector->num_ringpairs; q++) {
2435 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2436 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2437 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2438 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2439 (I40E_QUEUE_TYPE_TX
2440 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2441
2442 wr32(hw, I40E_QINT_RQCTL(qp), val);
2443
2444 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2445 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2446 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2447 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2448 (I40E_QUEUE_TYPE_RX
2449 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2450
2451 /* Terminate the linked list */
2452 if (q == (q_vector->num_ringpairs - 1))
2453 val |= (I40E_QUEUE_END_OF_LIST
2454 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2455
2456 wr32(hw, I40E_QINT_TQCTL(qp), val);
2457 qp++;
2458 }
2459 }
2460
2461 i40e_flush(hw);
2462}
2463
2464/**
2465 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2466 * @hw: ptr to the hardware info
2467 **/
2468static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2469{
2470 u32 val;
2471
2472 /* clear things first */
2473 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2474 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2475
2476 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2477 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2478 I40E_PFINT_ICR0_ENA_GRST_MASK |
2479 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2480 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2481 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2482 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2483 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2484 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2485
2486 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2487
2488 /* SW_ITR_IDX = 0, but don't change INTENA */
2489 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
2490 I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
2491
2492 /* OTHER_ITR_IDX = 0 */
2493 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2494}
2495
2496/**
2497 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2498 * @vsi: the VSI being configured
2499 **/
2500static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2501{
493fb300 2502 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
2503 struct i40e_pf *pf = vsi->back;
2504 struct i40e_hw *hw = &pf->hw;
2505 u32 val;
2506
2507 /* set the ITR configuration */
2508 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2509 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2510 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2511 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2512 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2513 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2514
2515 i40e_enable_misc_int_causes(hw);
2516
2517 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2518 wr32(hw, I40E_PFINT_LNKLST0, 0);
2519
2520 /* Associate the queue pair to the vector and enable the q int */
2521 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2522 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2523 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2524
2525 wr32(hw, I40E_QINT_RQCTL(0), val);
2526
2527 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2528 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2529 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2530
2531 wr32(hw, I40E_QINT_TQCTL(0), val);
2532 i40e_flush(hw);
2533}
2534
2535/**
2536 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2537 * @pf: board private structure
2538 **/
116a57d4 2539void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
2540{
2541 struct i40e_hw *hw = &pf->hw;
2542 u32 val;
2543
2544 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2545 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2546 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2547
2548 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2549 i40e_flush(hw);
2550}
2551
2552/**
2553 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2554 * @vsi: pointer to a vsi
2555 * @vector: enable a particular Hw Interrupt vector
2556 **/
2557void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2558{
2559 struct i40e_pf *pf = vsi->back;
2560 struct i40e_hw *hw = &pf->hw;
2561 u32 val;
2562
2563 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2564 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2565 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2566 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1022cb6c 2567 /* skip the flush */
41c445ff
JB
2568}
2569
2570/**
2571 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2572 * @irq: interrupt number
2573 * @data: pointer to a q_vector
2574 **/
2575static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2576{
2577 struct i40e_q_vector *q_vector = data;
2578
cd0b6fa6 2579 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2580 return IRQ_HANDLED;
2581
2582 napi_schedule(&q_vector->napi);
2583
2584 return IRQ_HANDLED;
2585}
2586
2587/**
2588 * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
2589 * @irq: interrupt number
2590 * @data: pointer to a q_vector
2591 **/
2592static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2593{
2594 struct i40e_q_vector *q_vector = data;
2595
cd0b6fa6 2596 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2597 return IRQ_HANDLED;
2598
2599 pr_info("fdir ring cleaning needed\n");
2600
2601 return IRQ_HANDLED;
2602}
2603
2604/**
2605 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2606 * @vsi: the VSI being configured
2607 * @basename: name for the vector
2608 *
2609 * Allocates MSI-X vectors and requests interrupts from the kernel.
2610 **/
2611static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2612{
2613 int q_vectors = vsi->num_q_vectors;
2614 struct i40e_pf *pf = vsi->back;
2615 int base = vsi->base_vector;
2616 int rx_int_idx = 0;
2617 int tx_int_idx = 0;
2618 int vector, err;
2619
2620 for (vector = 0; vector < q_vectors; vector++) {
493fb300 2621 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 2622
cd0b6fa6 2623 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
2624 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2625 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2626 tx_int_idx++;
cd0b6fa6 2627 } else if (q_vector->rx.ring) {
41c445ff
JB
2628 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2629 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 2630 } else if (q_vector->tx.ring) {
41c445ff
JB
2631 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2632 "%s-%s-%d", basename, "tx", tx_int_idx++);
2633 } else {
2634 /* skip this unused q_vector */
2635 continue;
2636 }
2637 err = request_irq(pf->msix_entries[base + vector].vector,
2638 vsi->irq_handler,
2639 0,
2640 q_vector->name,
2641 q_vector);
2642 if (err) {
2643 dev_info(&pf->pdev->dev,
2644 "%s: request_irq failed, error: %d\n",
2645 __func__, err);
2646 goto free_queue_irqs;
2647 }
2648 /* assign the mask for this irq */
2649 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2650 &q_vector->affinity_mask);
2651 }
2652
2653 return 0;
2654
2655free_queue_irqs:
2656 while (vector) {
2657 vector--;
2658 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2659 NULL);
2660 free_irq(pf->msix_entries[base + vector].vector,
2661 &(vsi->q_vectors[vector]));
2662 }
2663 return err;
2664}
2665
2666/**
2667 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2668 * @vsi: the VSI being un-configured
2669 **/
2670static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2671{
2672 struct i40e_pf *pf = vsi->back;
2673 struct i40e_hw *hw = &pf->hw;
2674 int base = vsi->base_vector;
2675 int i;
2676
2677 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
2678 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2679 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
41c445ff
JB
2680 }
2681
2682 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2683 for (i = vsi->base_vector;
2684 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2685 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2686
2687 i40e_flush(hw);
2688 for (i = 0; i < vsi->num_q_vectors; i++)
2689 synchronize_irq(pf->msix_entries[i + base].vector);
2690 } else {
2691 /* Legacy and MSI mode - this stops all interrupt handling */
2692 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2693 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2694 i40e_flush(hw);
2695 synchronize_irq(pf->pdev->irq);
2696 }
2697}
2698
2699/**
2700 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2701 * @vsi: the VSI being configured
2702 **/
2703static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2704{
2705 struct i40e_pf *pf = vsi->back;
2706 int i;
2707
2708 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2709 for (i = vsi->base_vector;
2710 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2711 i40e_irq_dynamic_enable(vsi, i);
2712 } else {
2713 i40e_irq_dynamic_enable_icr0(pf);
2714 }
2715
1022cb6c 2716 i40e_flush(&pf->hw);
41c445ff
JB
2717 return 0;
2718}
2719
2720/**
2721 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2722 * @pf: board private structure
2723 **/
2724static void i40e_stop_misc_vector(struct i40e_pf *pf)
2725{
2726 /* Disable ICR 0 */
2727 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2728 i40e_flush(&pf->hw);
2729}
2730
2731/**
2732 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2733 * @irq: interrupt number
2734 * @data: pointer to a q_vector
2735 *
2736 * This is the handler used for all MSI/Legacy interrupts, and deals
2737 * with both queue and non-queue interrupts. This is also used in
2738 * MSIX mode to handle the non-queue interrupts.
2739 **/
2740static irqreturn_t i40e_intr(int irq, void *data)
2741{
2742 struct i40e_pf *pf = (struct i40e_pf *)data;
2743 struct i40e_hw *hw = &pf->hw;
2744 u32 icr0, icr0_remaining;
2745 u32 val, ena_mask;
2746
2747 icr0 = rd32(hw, I40E_PFINT_ICR0);
2748
41c445ff
JB
2749 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2750 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2751 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2752
116a57d4
SN
2753 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2754 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2755 return IRQ_NONE;
2756
41c445ff
JB
2757 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2758
cd92e72f
SN
2759 /* if interrupt but no bits showing, must be SWINT */
2760 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
2761 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
2762 pf->sw_int_count++;
2763
41c445ff
JB
2764 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2765 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2766
2767 /* temporarily disable queue cause for NAPI processing */
2768 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2769 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2770 wr32(hw, I40E_QINT_RQCTL(0), qval);
2771
2772 qval = rd32(hw, I40E_QINT_TQCTL(0));
2773 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2774 wr32(hw, I40E_QINT_TQCTL(0), qval);
41c445ff
JB
2775
2776 if (!test_bit(__I40E_DOWN, &pf->state))
493fb300 2777 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
41c445ff
JB
2778 }
2779
2780 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2781 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2782 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2783 }
2784
2785 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2786 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2787 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2788 }
2789
2790 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2791 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2792 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2793 }
2794
2795 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2796 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2797 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2798 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2799 val = rd32(hw, I40E_GLGEN_RSTAT);
2800 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2801 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
d52cf0a9 2802 if (val == I40E_RESET_CORER)
41c445ff 2803 pf->corer_count++;
d52cf0a9 2804 else if (val == I40E_RESET_GLOBR)
41c445ff 2805 pf->globr_count++;
d52cf0a9 2806 else if (val == I40E_RESET_EMPR)
41c445ff
JB
2807 pf->empr_count++;
2808 }
2809
2810 /* If a critical error is pending we have no choice but to reset the
2811 * device.
2812 * Report and mask out any remaining unexpected interrupts.
2813 */
2814 icr0_remaining = icr0 & ena_mask;
2815 if (icr0_remaining) {
2816 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2817 icr0_remaining);
2818 if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
2819 (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2820 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2821 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2822 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2823 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2824 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2825 } else {
2826 dev_info(&pf->pdev->dev, "device will be reset\n");
2827 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2828 i40e_service_event_schedule(pf);
2829 }
2830 }
2831 ena_mask &= ~icr0_remaining;
2832 }
2833
2834 /* re-enable interrupt causes */
2835 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
41c445ff
JB
2836 if (!test_bit(__I40E_DOWN, &pf->state)) {
2837 i40e_service_event_schedule(pf);
2838 i40e_irq_dynamic_enable_icr0(pf);
2839 }
2840
2841 return IRQ_HANDLED;
2842}
2843
2844/**
cd0b6fa6 2845 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
2846 * @vsi: the VSI being configured
2847 * @v_idx: vector index
cd0b6fa6 2848 * @qp_idx: queue pair index
41c445ff 2849 **/
cd0b6fa6 2850static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 2851{
493fb300 2852 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
2853 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
2854 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
2855
2856 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
2857 tx_ring->next = q_vector->tx.ring;
2858 q_vector->tx.ring = tx_ring;
41c445ff 2859 q_vector->tx.count++;
cd0b6fa6
AD
2860
2861 rx_ring->q_vector = q_vector;
2862 rx_ring->next = q_vector->rx.ring;
2863 q_vector->rx.ring = rx_ring;
2864 q_vector->rx.count++;
41c445ff
JB
2865}
2866
2867/**
2868 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
2869 * @vsi: the VSI being configured
2870 *
2871 * This function maps descriptor rings to the queue-specific vectors
2872 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2873 * one vector per queue pair, but on a constrained vector budget, we
2874 * group the queue pairs as "efficiently" as possible.
2875 **/
2876static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2877{
2878 int qp_remaining = vsi->num_queue_pairs;
2879 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 2880 int num_ringpairs;
41c445ff
JB
2881 int v_start = 0;
2882 int qp_idx = 0;
2883
2884 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2885 * group them so there are multiple queues per vector.
2886 */
2887 for (; v_start < q_vectors && qp_remaining; v_start++) {
cd0b6fa6
AD
2888 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
2889
2890 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2891
2892 q_vector->num_ringpairs = num_ringpairs;
2893
2894 q_vector->rx.count = 0;
2895 q_vector->tx.count = 0;
2896 q_vector->rx.ring = NULL;
2897 q_vector->tx.ring = NULL;
2898
2899 while (num_ringpairs--) {
2900 map_vector_to_qp(vsi, v_start, qp_idx);
2901 qp_idx++;
2902 qp_remaining--;
41c445ff
JB
2903 }
2904 }
2905}
2906
2907/**
2908 * i40e_vsi_request_irq - Request IRQ from the OS
2909 * @vsi: the VSI being configured
2910 * @basename: name for the vector
2911 **/
2912static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
2913{
2914 struct i40e_pf *pf = vsi->back;
2915 int err;
2916
2917 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2918 err = i40e_vsi_request_irq_msix(vsi, basename);
2919 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
2920 err = request_irq(pf->pdev->irq, i40e_intr, 0,
2921 pf->misc_int_name, pf);
2922 else
2923 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
2924 pf->misc_int_name, pf);
2925
2926 if (err)
2927 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
2928
2929 return err;
2930}
2931
2932#ifdef CONFIG_NET_POLL_CONTROLLER
2933/**
2934 * i40e_netpoll - A Polling 'interrupt'handler
2935 * @netdev: network interface device structure
2936 *
2937 * This is used by netconsole to send skbs without having to re-enable
2938 * interrupts. It's not called while the normal interrupt routine is executing.
2939 **/
2940static void i40e_netpoll(struct net_device *netdev)
2941{
2942 struct i40e_netdev_priv *np = netdev_priv(netdev);
2943 struct i40e_vsi *vsi = np->vsi;
2944 struct i40e_pf *pf = vsi->back;
2945 int i;
2946
2947 /* if interface is down do nothing */
2948 if (test_bit(__I40E_DOWN, &vsi->state))
2949 return;
2950
2951 pf->flags |= I40E_FLAG_IN_NETPOLL;
2952 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2953 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 2954 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
2955 } else {
2956 i40e_intr(pf->pdev->irq, netdev);
2957 }
2958 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
2959}
2960#endif
2961
2962/**
2963 * i40e_vsi_control_tx - Start or stop a VSI's rings
2964 * @vsi: the VSI being configured
2965 * @enable: start or stop the rings
2966 **/
2967static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
2968{
2969 struct i40e_pf *pf = vsi->back;
2970 struct i40e_hw *hw = &pf->hw;
2971 int i, j, pf_q;
2972 u32 tx_reg;
2973
2974 pf_q = vsi->base_queue;
2975 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2976 j = 1000;
2977 do {
2978 usleep_range(1000, 2000);
2979 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
2980 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
2981 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
2982
2983 if (enable) {
2984 /* is STAT set ? */
2985 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2986 dev_info(&pf->pdev->dev,
2987 "Tx %d already enabled\n", i);
2988 continue;
2989 }
2990 } else {
2991 /* is !STAT set ? */
2992 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
2993 dev_info(&pf->pdev->dev,
2994 "Tx %d already disabled\n", i);
2995 continue;
2996 }
2997 }
2998
2999 /* turn on/off the queue */
3000 if (enable)
3001 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3002 I40E_QTX_ENA_QENA_STAT_MASK;
3003 else
3004 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3005
3006 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3007
3008 /* wait for the change to finish */
3009 for (j = 0; j < 10; j++) {
3010 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3011 if (enable) {
3012 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3013 break;
3014 } else {
3015 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3016 break;
3017 }
3018
3019 udelay(10);
3020 }
3021 if (j >= 10) {
3022 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3023 pf_q, (enable ? "en" : "dis"));
3024 return -ETIMEDOUT;
3025 }
3026 }
3027
7134f9ce
JB
3028 if (hw->revision_id == 0)
3029 mdelay(50);
3030
41c445ff
JB
3031 return 0;
3032}
3033
3034/**
3035 * i40e_vsi_control_rx - Start or stop a VSI's rings
3036 * @vsi: the VSI being configured
3037 * @enable: start or stop the rings
3038 **/
3039static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3040{
3041 struct i40e_pf *pf = vsi->back;
3042 struct i40e_hw *hw = &pf->hw;
3043 int i, j, pf_q;
3044 u32 rx_reg;
3045
3046 pf_q = vsi->base_queue;
3047 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3048 j = 1000;
3049 do {
3050 usleep_range(1000, 2000);
3051 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3052 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
3053 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
3054
3055 if (enable) {
3056 /* is STAT set ? */
3057 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3058 continue;
3059 } else {
3060 /* is !STAT set ? */
3061 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3062 continue;
3063 }
3064
3065 /* turn on/off the queue */
3066 if (enable)
3067 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3068 I40E_QRX_ENA_QENA_STAT_MASK;
3069 else
3070 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3071 I40E_QRX_ENA_QENA_STAT_MASK);
3072 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3073
3074 /* wait for the change to finish */
3075 for (j = 0; j < 10; j++) {
3076 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3077
3078 if (enable) {
3079 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3080 break;
3081 } else {
3082 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3083 break;
3084 }
3085
3086 udelay(10);
3087 }
3088 if (j >= 10) {
3089 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3090 pf_q, (enable ? "en" : "dis"));
3091 return -ETIMEDOUT;
3092 }
3093 }
3094
3095 return 0;
3096}
3097
3098/**
3099 * i40e_vsi_control_rings - Start or stop a VSI's rings
3100 * @vsi: the VSI being configured
3101 * @enable: start or stop the rings
3102 **/
3103static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3104{
3105 int ret;
3106
3107 /* do rx first for enable and last for disable */
3108 if (request) {
3109 ret = i40e_vsi_control_rx(vsi, request);
3110 if (ret)
3111 return ret;
3112 ret = i40e_vsi_control_tx(vsi, request);
3113 } else {
3114 ret = i40e_vsi_control_tx(vsi, request);
3115 if (ret)
3116 return ret;
3117 ret = i40e_vsi_control_rx(vsi, request);
3118 }
3119
3120 return ret;
3121}
3122
3123/**
3124 * i40e_vsi_free_irq - Free the irq association with the OS
3125 * @vsi: the VSI being configured
3126 **/
3127static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3128{
3129 struct i40e_pf *pf = vsi->back;
3130 struct i40e_hw *hw = &pf->hw;
3131 int base = vsi->base_vector;
3132 u32 val, qp;
3133 int i;
3134
3135 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3136 if (!vsi->q_vectors)
3137 return;
3138
3139 for (i = 0; i < vsi->num_q_vectors; i++) {
3140 u16 vector = i + base;
3141
3142 /* free only the irqs that were actually requested */
493fb300 3143 if (vsi->q_vectors[i]->num_ringpairs == 0)
41c445ff
JB
3144 continue;
3145
3146 /* clear the affinity_mask in the IRQ descriptor */
3147 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3148 NULL);
3149 free_irq(pf->msix_entries[vector].vector,
493fb300 3150 vsi->q_vectors[i]);
41c445ff
JB
3151
3152 /* Tear down the interrupt queue link list
3153 *
3154 * We know that they come in pairs and always
3155 * the Rx first, then the Tx. To clear the
3156 * link list, stick the EOL value into the
3157 * next_q field of the registers.
3158 */
3159 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3160 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3161 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3162 val |= I40E_QUEUE_END_OF_LIST
3163 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3164 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3165
3166 while (qp != I40E_QUEUE_END_OF_LIST) {
3167 u32 next;
3168
3169 val = rd32(hw, I40E_QINT_RQCTL(qp));
3170
3171 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3172 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3173 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3174 I40E_QINT_RQCTL_INTEVENT_MASK);
3175
3176 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3177 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3178
3179 wr32(hw, I40E_QINT_RQCTL(qp), val);
3180
3181 val = rd32(hw, I40E_QINT_TQCTL(qp));
3182
3183 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3184 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3185
3186 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3187 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3188 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3189 I40E_QINT_TQCTL_INTEVENT_MASK);
3190
3191 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3192 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3193
3194 wr32(hw, I40E_QINT_TQCTL(qp), val);
3195 qp = next;
3196 }
3197 }
3198 } else {
3199 free_irq(pf->pdev->irq, pf);
3200
3201 val = rd32(hw, I40E_PFINT_LNKLST0);
3202 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3203 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3204 val |= I40E_QUEUE_END_OF_LIST
3205 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3206 wr32(hw, I40E_PFINT_LNKLST0, val);
3207
3208 val = rd32(hw, I40E_QINT_RQCTL(qp));
3209 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3210 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3211 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3212 I40E_QINT_RQCTL_INTEVENT_MASK);
3213
3214 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3215 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3216
3217 wr32(hw, I40E_QINT_RQCTL(qp), val);
3218
3219 val = rd32(hw, I40E_QINT_TQCTL(qp));
3220
3221 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3222 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3223 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3224 I40E_QINT_TQCTL_INTEVENT_MASK);
3225
3226 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3227 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3228
3229 wr32(hw, I40E_QINT_TQCTL(qp), val);
3230 }
3231}
3232
493fb300
AD
3233/**
3234 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3235 * @vsi: the VSI being configured
3236 * @v_idx: Index of vector to be freed
3237 *
3238 * This function frees the memory allocated to the q_vector. In addition if
3239 * NAPI is enabled it will delete any references to the NAPI struct prior
3240 * to freeing the q_vector.
3241 **/
3242static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3243{
3244 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 3245 struct i40e_ring *ring;
493fb300
AD
3246
3247 if (!q_vector)
3248 return;
3249
3250 /* disassociate q_vector from rings */
cd0b6fa6
AD
3251 i40e_for_each_ring(ring, q_vector->tx)
3252 ring->q_vector = NULL;
3253
3254 i40e_for_each_ring(ring, q_vector->rx)
3255 ring->q_vector = NULL;
493fb300
AD
3256
3257 /* only VSI w/ an associated netdev is set up w/ NAPI */
3258 if (vsi->netdev)
3259 netif_napi_del(&q_vector->napi);
3260
3261 vsi->q_vectors[v_idx] = NULL;
3262
3263 kfree_rcu(q_vector, rcu);
3264}
3265
41c445ff
JB
3266/**
3267 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3268 * @vsi: the VSI being un-configured
3269 *
3270 * This frees the memory allocated to the q_vectors and
3271 * deletes references to the NAPI struct.
3272 **/
3273static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3274{
3275 int v_idx;
3276
493fb300
AD
3277 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3278 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
3279}
3280
3281/**
3282 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3283 * @pf: board private structure
3284 **/
3285static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3286{
3287 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3288 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3289 pci_disable_msix(pf->pdev);
3290 kfree(pf->msix_entries);
3291 pf->msix_entries = NULL;
3292 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3293 pci_disable_msi(pf->pdev);
3294 }
3295 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3296}
3297
3298/**
3299 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3300 * @pf: board private structure
3301 *
3302 * We go through and clear interrupt specific resources and reset the structure
3303 * to pre-load conditions
3304 **/
3305static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3306{
3307 int i;
3308
3309 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3310 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3311 if (pf->vsi[i])
3312 i40e_vsi_free_q_vectors(pf->vsi[i]);
3313 i40e_reset_interrupt_capability(pf);
3314}
3315
3316/**
3317 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3318 * @vsi: the VSI being configured
3319 **/
3320static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3321{
3322 int q_idx;
3323
3324 if (!vsi->netdev)
3325 return;
3326
3327 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3328 napi_enable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3329}
3330
3331/**
3332 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3333 * @vsi: the VSI being configured
3334 **/
3335static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3336{
3337 int q_idx;
3338
3339 if (!vsi->netdev)
3340 return;
3341
3342 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3343 napi_disable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3344}
3345
3346/**
3347 * i40e_quiesce_vsi - Pause a given VSI
3348 * @vsi: the VSI being paused
3349 **/
3350static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3351{
3352 if (test_bit(__I40E_DOWN, &vsi->state))
3353 return;
3354
3355 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3356 if (vsi->netdev && netif_running(vsi->netdev)) {
3357 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3358 } else {
3359 set_bit(__I40E_DOWN, &vsi->state);
3360 i40e_down(vsi);
3361 }
3362}
3363
3364/**
3365 * i40e_unquiesce_vsi - Resume a given VSI
3366 * @vsi: the VSI being resumed
3367 **/
3368static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3369{
3370 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3371 return;
3372
3373 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3374 if (vsi->netdev && netif_running(vsi->netdev))
3375 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3376 else
3377 i40e_up(vsi); /* this clears the DOWN bit */
3378}
3379
3380/**
3381 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3382 * @pf: the PF
3383 **/
3384static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3385{
3386 int v;
3387
3388 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3389 if (pf->vsi[v])
3390 i40e_quiesce_vsi(pf->vsi[v]);
3391 }
3392}
3393
3394/**
3395 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3396 * @pf: the PF
3397 **/
3398static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3399{
3400 int v;
3401
3402 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3403 if (pf->vsi[v])
3404 i40e_unquiesce_vsi(pf->vsi[v]);
3405 }
3406}
3407
3408/**
3409 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3410 * @dcbcfg: the corresponding DCBx configuration structure
3411 *
3412 * Return the number of TCs from given DCBx configuration
3413 **/
3414static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3415{
078b5876
JB
3416 u8 num_tc = 0;
3417 int i;
41c445ff
JB
3418
3419 /* Scan the ETS Config Priority Table to find
3420 * traffic class enabled for a given priority
3421 * and use the traffic class index to get the
3422 * number of traffic classes enabled
3423 */
3424 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3425 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3426 num_tc = dcbcfg->etscfg.prioritytable[i];
3427 }
3428
3429 /* Traffic class index starts from zero so
3430 * increment to return the actual count
3431 */
078b5876 3432 return num_tc + 1;
41c445ff
JB
3433}
3434
3435/**
3436 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3437 * @dcbcfg: the corresponding DCBx configuration structure
3438 *
3439 * Query the current DCB configuration and return the number of
3440 * traffic classes enabled from the given DCBX config
3441 **/
3442static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3443{
3444 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3445 u8 enabled_tc = 1;
3446 u8 i;
3447
3448 for (i = 0; i < num_tc; i++)
3449 enabled_tc |= 1 << i;
3450
3451 return enabled_tc;
3452}
3453
3454/**
3455 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3456 * @pf: PF being queried
3457 *
3458 * Return number of traffic classes enabled for the given PF
3459 **/
3460static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3461{
3462 struct i40e_hw *hw = &pf->hw;
3463 u8 i, enabled_tc;
3464 u8 num_tc = 0;
3465 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3466
3467 /* If DCB is not enabled then always in single TC */
3468 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3469 return 1;
3470
3471 /* MFP mode return count of enabled TCs for this PF */
3472 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3473 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3474 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3475 if (enabled_tc & (1 << i))
3476 num_tc++;
3477 }
3478 return num_tc;
3479 }
3480
3481 /* SFP mode will be enabled for all TCs on port */
3482 return i40e_dcb_get_num_tc(dcbcfg);
3483}
3484
3485/**
3486 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3487 * @pf: PF being queried
3488 *
3489 * Return a bitmap for first enabled traffic class for this PF.
3490 **/
3491static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3492{
3493 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3494 u8 i = 0;
3495
3496 if (!enabled_tc)
3497 return 0x1; /* TC0 */
3498
3499 /* Find the first enabled TC */
3500 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3501 if (enabled_tc & (1 << i))
3502 break;
3503 }
3504
3505 return 1 << i;
3506}
3507
3508/**
3509 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3510 * @pf: PF being queried
3511 *
3512 * Return a bitmap for enabled traffic classes for this PF.
3513 **/
3514static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3515{
3516 /* If DCB is not enabled for this PF then just return default TC */
3517 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3518 return i40e_pf_get_default_tc(pf);
3519
3520 /* MFP mode will have enabled TCs set by FW */
3521 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3522 return pf->hw.func_caps.enabled_tcmap;
3523
3524 /* SFP mode we want PF to be enabled for all TCs */
3525 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3526}
3527
3528/**
3529 * i40e_vsi_get_bw_info - Query VSI BW Information
3530 * @vsi: the VSI being queried
3531 *
3532 * Returns 0 on success, negative value on failure
3533 **/
3534static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3535{
3536 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3537 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3538 struct i40e_pf *pf = vsi->back;
3539 struct i40e_hw *hw = &pf->hw;
dcae29be 3540 i40e_status aq_ret;
41c445ff 3541 u32 tc_bw_max;
41c445ff
JB
3542 int i;
3543
3544 /* Get the VSI level BW configuration */
dcae29be
JB
3545 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3546 if (aq_ret) {
41c445ff
JB
3547 dev_info(&pf->pdev->dev,
3548 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
dcae29be
JB
3549 aq_ret, pf->hw.aq.asq_last_status);
3550 return -EINVAL;
41c445ff
JB
3551 }
3552
3553 /* Get the VSI level BW configuration per TC */
dcae29be
JB
3554 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3555 NULL);
3556 if (aq_ret) {
41c445ff
JB
3557 dev_info(&pf->pdev->dev,
3558 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
dcae29be
JB
3559 aq_ret, pf->hw.aq.asq_last_status);
3560 return -EINVAL;
41c445ff
JB
3561 }
3562
3563 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3564 dev_info(&pf->pdev->dev,
3565 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3566 bw_config.tc_valid_bits,
3567 bw_ets_config.tc_valid_bits);
3568 /* Still continuing */
3569 }
3570
3571 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3572 vsi->bw_max_quanta = bw_config.max_bw;
3573 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3574 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3575 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3576 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3577 vsi->bw_ets_limit_credits[i] =
3578 le16_to_cpu(bw_ets_config.credits[i]);
3579 /* 3 bits out of 4 for each TC */
3580 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3581 }
078b5876 3582
dcae29be 3583 return 0;
41c445ff
JB
3584}
3585
3586/**
3587 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3588 * @vsi: the VSI being configured
3589 * @enabled_tc: TC bitmap
3590 * @bw_credits: BW shared credits per TC
3591 *
3592 * Returns 0 on success, negative value on failure
3593 **/
dcae29be 3594static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
3595 u8 *bw_share)
3596{
3597 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
dcae29be
JB
3598 i40e_status aq_ret;
3599 int i;
41c445ff
JB
3600
3601 bw_data.tc_valid_bits = enabled_tc;
3602 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3603 bw_data.tc_bw_credits[i] = bw_share[i];
3604
dcae29be
JB
3605 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3606 NULL);
3607 if (aq_ret) {
41c445ff
JB
3608 dev_info(&vsi->back->pdev->dev,
3609 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3610 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 3611 return -EINVAL;
41c445ff
JB
3612 }
3613
3614 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3615 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3616
dcae29be 3617 return 0;
41c445ff
JB
3618}
3619
3620/**
3621 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3622 * @vsi: the VSI being configured
3623 * @enabled_tc: TC map to be enabled
3624 *
3625 **/
3626static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3627{
3628 struct net_device *netdev = vsi->netdev;
3629 struct i40e_pf *pf = vsi->back;
3630 struct i40e_hw *hw = &pf->hw;
3631 u8 netdev_tc = 0;
3632 int i;
3633 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3634
3635 if (!netdev)
3636 return;
3637
3638 if (!enabled_tc) {
3639 netdev_reset_tc(netdev);
3640 return;
3641 }
3642
3643 /* Set up actual enabled TCs on the VSI */
3644 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3645 return;
3646
3647 /* set per TC queues for the VSI */
3648 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3649 /* Only set TC queues for enabled tcs
3650 *
3651 * e.g. For a VSI that has TC0 and TC3 enabled the
3652 * enabled_tc bitmap would be 0x00001001; the driver
3653 * will set the numtc for netdev as 2 that will be
3654 * referenced by the netdev layer as TC 0 and 1.
3655 */
3656 if (vsi->tc_config.enabled_tc & (1 << i))
3657 netdev_set_tc_queue(netdev,
3658 vsi->tc_config.tc_info[i].netdev_tc,
3659 vsi->tc_config.tc_info[i].qcount,
3660 vsi->tc_config.tc_info[i].qoffset);
3661 }
3662
3663 /* Assign UP2TC map for the VSI */
3664 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3665 /* Get the actual TC# for the UP */
3666 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3667 /* Get the mapped netdev TC# for the UP */
3668 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3669 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3670 }
3671}
3672
3673/**
3674 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3675 * @vsi: the VSI being configured
3676 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3677 **/
3678static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3679 struct i40e_vsi_context *ctxt)
3680{
3681 /* copy just the sections touched not the entire info
3682 * since not all sections are valid as returned by
3683 * update vsi params
3684 */
3685 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3686 memcpy(&vsi->info.queue_mapping,
3687 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3688 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3689 sizeof(vsi->info.tc_mapping));
3690}
3691
3692/**
3693 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3694 * @vsi: VSI to be configured
3695 * @enabled_tc: TC bitmap
3696 *
3697 * This configures a particular VSI for TCs that are mapped to the
3698 * given TC bitmap. It uses default bandwidth share for TCs across
3699 * VSIs to configure TC for a particular VSI.
3700 *
3701 * NOTE:
3702 * It is expected that the VSI queues have been quisced before calling
3703 * this function.
3704 **/
3705static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3706{
3707 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3708 struct i40e_vsi_context ctxt;
3709 int ret = 0;
3710 int i;
3711
3712 /* Check if enabled_tc is same as existing or new TCs */
3713 if (vsi->tc_config.enabled_tc == enabled_tc)
3714 return ret;
3715
3716 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3717 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3718 if (enabled_tc & (1 << i))
3719 bw_share[i] = 1;
3720 }
3721
3722 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3723 if (ret) {
3724 dev_info(&vsi->back->pdev->dev,
3725 "Failed configuring TC map %d for VSI %d\n",
3726 enabled_tc, vsi->seid);
3727 goto out;
3728 }
3729
3730 /* Update Queue Pairs Mapping for currently enabled UPs */
3731 ctxt.seid = vsi->seid;
3732 ctxt.pf_num = vsi->back->hw.pf_id;
3733 ctxt.vf_num = 0;
3734 ctxt.uplink_seid = vsi->uplink_seid;
3735 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3736 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3737
3738 /* Update the VSI after updating the VSI queue-mapping information */
3739 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3740 if (ret) {
3741 dev_info(&vsi->back->pdev->dev,
3742 "update vsi failed, aq_err=%d\n",
3743 vsi->back->hw.aq.asq_last_status);
3744 goto out;
3745 }
3746 /* update the local VSI info with updated queue map */
3747 i40e_vsi_update_queue_map(vsi, &ctxt);
3748 vsi->info.valid_sections = 0;
3749
3750 /* Update current VSI BW information */
3751 ret = i40e_vsi_get_bw_info(vsi);
3752 if (ret) {
3753 dev_info(&vsi->back->pdev->dev,
3754 "Failed updating vsi bw info, aq_err=%d\n",
3755 vsi->back->hw.aq.asq_last_status);
3756 goto out;
3757 }
3758
3759 /* Update the netdev TC setup */
3760 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3761out:
3762 return ret;
3763}
3764
3765/**
3766 * i40e_up_complete - Finish the last steps of bringing up a connection
3767 * @vsi: the VSI being configured
3768 **/
3769static int i40e_up_complete(struct i40e_vsi *vsi)
3770{
3771 struct i40e_pf *pf = vsi->back;
3772 int err;
3773
3774 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3775 i40e_vsi_configure_msix(vsi);
3776 else
3777 i40e_configure_msi_and_legacy(vsi);
3778
3779 /* start rings */
3780 err = i40e_vsi_control_rings(vsi, true);
3781 if (err)
3782 return err;
3783
3784 clear_bit(__I40E_DOWN, &vsi->state);
3785 i40e_napi_enable_all(vsi);
3786 i40e_vsi_enable_irq(vsi);
3787
3788 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3789 (vsi->netdev)) {
6d779b41 3790 netdev_info(vsi->netdev, "NIC Link is Up\n");
41c445ff
JB
3791 netif_tx_start_all_queues(vsi->netdev);
3792 netif_carrier_on(vsi->netdev);
6d779b41
AS
3793 } else if (vsi->netdev) {
3794 netdev_info(vsi->netdev, "NIC Link is Down\n");
41c445ff
JB
3795 }
3796 i40e_service_event_schedule(pf);
3797
3798 return 0;
3799}
3800
3801/**
3802 * i40e_vsi_reinit_locked - Reset the VSI
3803 * @vsi: the VSI being configured
3804 *
3805 * Rebuild the ring structs after some configuration
3806 * has changed, e.g. MTU size.
3807 **/
3808static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
3809{
3810 struct i40e_pf *pf = vsi->back;
3811
3812 WARN_ON(in_interrupt());
3813 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
3814 usleep_range(1000, 2000);
3815 i40e_down(vsi);
3816
3817 /* Give a VF some time to respond to the reset. The
3818 * two second wait is based upon the watchdog cycle in
3819 * the VF driver.
3820 */
3821 if (vsi->type == I40E_VSI_SRIOV)
3822 msleep(2000);
3823 i40e_up(vsi);
3824 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
3825}
3826
3827/**
3828 * i40e_up - Bring the connection back up after being down
3829 * @vsi: the VSI being configured
3830 **/
3831int i40e_up(struct i40e_vsi *vsi)
3832{
3833 int err;
3834
3835 err = i40e_vsi_configure(vsi);
3836 if (!err)
3837 err = i40e_up_complete(vsi);
3838
3839 return err;
3840}
3841
3842/**
3843 * i40e_down - Shutdown the connection processing
3844 * @vsi: the VSI being stopped
3845 **/
3846void i40e_down(struct i40e_vsi *vsi)
3847{
3848 int i;
3849
3850 /* It is assumed that the caller of this function
3851 * sets the vsi->state __I40E_DOWN bit.
3852 */
3853 if (vsi->netdev) {
3854 netif_carrier_off(vsi->netdev);
3855 netif_tx_disable(vsi->netdev);
3856 }
3857 i40e_vsi_disable_irq(vsi);
3858 i40e_vsi_control_rings(vsi, false);
3859 i40e_napi_disable_all(vsi);
3860
3861 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
3862 i40e_clean_tx_ring(vsi->tx_rings[i]);
3863 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
3864 }
3865}
3866
3867/**
3868 * i40e_setup_tc - configure multiple traffic classes
3869 * @netdev: net device to configure
3870 * @tc: number of traffic classes to enable
3871 **/
3872static int i40e_setup_tc(struct net_device *netdev, u8 tc)
3873{
3874 struct i40e_netdev_priv *np = netdev_priv(netdev);
3875 struct i40e_vsi *vsi = np->vsi;
3876 struct i40e_pf *pf = vsi->back;
3877 u8 enabled_tc = 0;
3878 int ret = -EINVAL;
3879 int i;
3880
3881 /* Check if DCB enabled to continue */
3882 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
3883 netdev_info(netdev, "DCB is not enabled for adapter\n");
3884 goto exit;
3885 }
3886
3887 /* Check if MFP enabled */
3888 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3889 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
3890 goto exit;
3891 }
3892
3893 /* Check whether tc count is within enabled limit */
3894 if (tc > i40e_pf_get_num_tc(pf)) {
3895 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
3896 goto exit;
3897 }
3898
3899 /* Generate TC map for number of tc requested */
3900 for (i = 0; i < tc; i++)
3901 enabled_tc |= (1 << i);
3902
3903 /* Requesting same TC configuration as already enabled */
3904 if (enabled_tc == vsi->tc_config.enabled_tc)
3905 return 0;
3906
3907 /* Quiesce VSI queues */
3908 i40e_quiesce_vsi(vsi);
3909
3910 /* Configure VSI for enabled TCs */
3911 ret = i40e_vsi_config_tc(vsi, enabled_tc);
3912 if (ret) {
3913 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
3914 vsi->seid);
3915 goto exit;
3916 }
3917
3918 /* Unquiesce VSI */
3919 i40e_unquiesce_vsi(vsi);
3920
3921exit:
3922 return ret;
3923}
3924
3925/**
3926 * i40e_open - Called when a network interface is made active
3927 * @netdev: network interface device structure
3928 *
3929 * The open entry point is called when a network interface is made
3930 * active by the system (IFF_UP). At this point all resources needed
3931 * for transmit and receive operations are allocated, the interrupt
3932 * handler is registered with the OS, the netdev watchdog subtask is
3933 * enabled, and the stack is notified that the interface is ready.
3934 *
3935 * Returns 0 on success, negative value on failure
3936 **/
3937static int i40e_open(struct net_device *netdev)
3938{
3939 struct i40e_netdev_priv *np = netdev_priv(netdev);
3940 struct i40e_vsi *vsi = np->vsi;
3941 struct i40e_pf *pf = vsi->back;
3942 char int_name[IFNAMSIZ];
3943 int err;
3944
3945 /* disallow open during test */
3946 if (test_bit(__I40E_TESTING, &pf->state))
3947 return -EBUSY;
3948
3949 netif_carrier_off(netdev);
3950
3951 /* allocate descriptors */
3952 err = i40e_vsi_setup_tx_resources(vsi);
3953 if (err)
3954 goto err_setup_tx;
3955 err = i40e_vsi_setup_rx_resources(vsi);
3956 if (err)
3957 goto err_setup_rx;
3958
3959 err = i40e_vsi_configure(vsi);
3960 if (err)
3961 goto err_setup_rx;
3962
3963 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3964 dev_driver_string(&pf->pdev->dev), netdev->name);
3965 err = i40e_vsi_request_irq(vsi, int_name);
3966 if (err)
3967 goto err_setup_rx;
3968
3969 err = i40e_up_complete(vsi);
3970 if (err)
3971 goto err_up_complete;
3972
3973 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
3974 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
3975 if (err)
3976 netdev_info(netdev,
3977 "couldn't set broadcast err %d aq_err %d\n",
3978 err, pf->hw.aq.asq_last_status);
3979 }
3980
3981 return 0;
3982
3983err_up_complete:
3984 i40e_down(vsi);
3985 i40e_vsi_free_irq(vsi);
3986err_setup_rx:
3987 i40e_vsi_free_rx_resources(vsi);
3988err_setup_tx:
3989 i40e_vsi_free_tx_resources(vsi);
3990 if (vsi == pf->vsi[pf->lan_vsi])
3991 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
3992
3993 return err;
3994}
3995
3996/**
3997 * i40e_close - Disables a network interface
3998 * @netdev: network interface device structure
3999 *
4000 * The close entry point is called when an interface is de-activated
4001 * by the OS. The hardware is still under the driver's control, but
4002 * this netdev interface is disabled.
4003 *
4004 * Returns 0, this is not allowed to fail
4005 **/
4006static int i40e_close(struct net_device *netdev)
4007{
4008 struct i40e_netdev_priv *np = netdev_priv(netdev);
4009 struct i40e_vsi *vsi = np->vsi;
4010
4011 if (test_and_set_bit(__I40E_DOWN, &vsi->state))
4012 return 0;
4013
4014 i40e_down(vsi);
4015 i40e_vsi_free_irq(vsi);
4016
4017 i40e_vsi_free_tx_resources(vsi);
4018 i40e_vsi_free_rx_resources(vsi);
4019
4020 return 0;
4021}
4022
4023/**
4024 * i40e_do_reset - Start a PF or Core Reset sequence
4025 * @pf: board private structure
4026 * @reset_flags: which reset is requested
4027 *
4028 * The essential difference in resets is that the PF Reset
4029 * doesn't clear the packet buffers, doesn't reset the PE
4030 * firmware, and doesn't bother the other PFs on the chip.
4031 **/
4032void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4033{
4034 u32 val;
4035
4036 WARN_ON(in_interrupt());
4037
4038 /* do the biggest reset indicated */
4039 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4040
4041 /* Request a Global Reset
4042 *
4043 * This will start the chip's countdown to the actual full
4044 * chip reset event, and a warning interrupt to be sent
4045 * to all PFs, including the requestor. Our handler
4046 * for the warning interrupt will deal with the shutdown
4047 * and recovery of the switch setup.
4048 */
4049 dev_info(&pf->pdev->dev, "GlobalR requested\n");
4050 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4051 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4052 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4053
4054 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4055
4056 /* Request a Core Reset
4057 *
4058 * Same as Global Reset, except does *not* include the MAC/PHY
4059 */
4060 dev_info(&pf->pdev->dev, "CoreR requested\n");
4061 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4062 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4063 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4064 i40e_flush(&pf->hw);
4065
7823fe34
SN
4066 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
4067
4068 /* Request a Firmware Reset
4069 *
4070 * Same as Global reset, plus restarting the
4071 * embedded firmware engine.
4072 */
4073 /* enable EMP Reset */
4074 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
4075 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
4076 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
4077
4078 /* force the reset */
4079 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4080 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
4081 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4082 i40e_flush(&pf->hw);
4083
41c445ff
JB
4084 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4085
4086 /* Request a PF Reset
4087 *
4088 * Resets only the PF-specific registers
4089 *
4090 * This goes directly to the tear-down and rebuild of
4091 * the switch, since we need to do all the recovery as
4092 * for the Core Reset.
4093 */
4094 dev_info(&pf->pdev->dev, "PFR requested\n");
4095 i40e_handle_reset_warning(pf);
4096
4097 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4098 int v;
4099
4100 /* Find the VSI(s) that requested a re-init */
4101 dev_info(&pf->pdev->dev,
4102 "VSI reinit requested\n");
4103 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4104 struct i40e_vsi *vsi = pf->vsi[v];
4105 if (vsi != NULL &&
4106 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4107 i40e_vsi_reinit_locked(pf->vsi[v]);
4108 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4109 }
4110 }
4111
4112 /* no further action needed, so return now */
4113 return;
4114 } else {
4115 dev_info(&pf->pdev->dev,
4116 "bad reset request 0x%08x\n", reset_flags);
4117 return;
4118 }
4119}
4120
4121/**
4122 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4123 * @pf: board private structure
4124 * @e: event info posted on ARQ
4125 *
4126 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4127 * and VF queues
4128 **/
4129static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4130 struct i40e_arq_event_info *e)
4131{
4132 struct i40e_aqc_lan_overflow *data =
4133 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4134 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4135 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4136 struct i40e_hw *hw = &pf->hw;
4137 struct i40e_vf *vf;
4138 u16 vf_id;
4139
4140 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4141 __func__, queue, qtx_ctl);
4142
4143 /* Queue belongs to VF, find the VF and issue VF reset */
4144 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4145 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4146 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4147 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4148 vf_id -= hw->func_caps.vf_base_id;
4149 vf = &pf->vf[vf_id];
4150 i40e_vc_notify_vf_reset(vf);
4151 /* Allow VF to process pending reset notification */
4152 msleep(20);
4153 i40e_reset_vf(vf, false);
4154 }
4155}
4156
4157/**
4158 * i40e_service_event_complete - Finish up the service event
4159 * @pf: board private structure
4160 **/
4161static void i40e_service_event_complete(struct i40e_pf *pf)
4162{
4163 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4164
4165 /* flush memory to make sure state is correct before next watchog */
4166 smp_mb__before_clear_bit();
4167 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4168}
4169
4170/**
4171 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4172 * @pf: board private structure
4173 **/
4174static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4175{
4176 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4177 return;
4178
4179 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4180
4181 /* if interface is down do nothing */
4182 if (test_bit(__I40E_DOWN, &pf->state))
4183 return;
4184}
4185
4186/**
4187 * i40e_vsi_link_event - notify VSI of a link event
4188 * @vsi: vsi to be notified
4189 * @link_up: link up or down
4190 **/
4191static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4192{
4193 if (!vsi)
4194 return;
4195
4196 switch (vsi->type) {
4197 case I40E_VSI_MAIN:
4198 if (!vsi->netdev || !vsi->netdev_registered)
4199 break;
4200
4201 if (link_up) {
4202 netif_carrier_on(vsi->netdev);
4203 netif_tx_wake_all_queues(vsi->netdev);
4204 } else {
4205 netif_carrier_off(vsi->netdev);
4206 netif_tx_stop_all_queues(vsi->netdev);
4207 }
4208 break;
4209
4210 case I40E_VSI_SRIOV:
4211 break;
4212
4213 case I40E_VSI_VMDQ2:
4214 case I40E_VSI_CTRL:
4215 case I40E_VSI_MIRROR:
4216 default:
4217 /* there is no notification for other VSIs */
4218 break;
4219 }
4220}
4221
4222/**
4223 * i40e_veb_link_event - notify elements on the veb of a link event
4224 * @veb: veb to be notified
4225 * @link_up: link up or down
4226 **/
4227static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4228{
4229 struct i40e_pf *pf;
4230 int i;
4231
4232 if (!veb || !veb->pf)
4233 return;
4234 pf = veb->pf;
4235
4236 /* depth first... */
4237 for (i = 0; i < I40E_MAX_VEB; i++)
4238 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4239 i40e_veb_link_event(pf->veb[i], link_up);
4240
4241 /* ... now the local VSIs */
4242 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4243 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4244 i40e_vsi_link_event(pf->vsi[i], link_up);
4245}
4246
4247/**
4248 * i40e_link_event - Update netif_carrier status
4249 * @pf: board private structure
4250 **/
4251static void i40e_link_event(struct i40e_pf *pf)
4252{
4253 bool new_link, old_link;
4254
4255 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4256 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4257
4258 if (new_link == old_link)
4259 return;
4260
6d779b41
AS
4261 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4262 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4263 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
41c445ff
JB
4264
4265 /* Notify the base of the switch tree connected to
4266 * the link. Floating VEBs are not notified.
4267 */
4268 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4269 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4270 else
4271 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4272
4273 if (pf->vf)
4274 i40e_vc_notify_link_state(pf);
4275}
4276
4277/**
4278 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4279 * @pf: board private structure
4280 *
4281 * Set the per-queue flags to request a check for stuck queues in the irq
4282 * clean functions, then force interrupts to be sure the irq clean is called.
4283 **/
4284static void i40e_check_hang_subtask(struct i40e_pf *pf)
4285{
4286 int i, v;
4287
4288 /* If we're down or resetting, just bail */
4289 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4290 return;
4291
4292 /* for each VSI/netdev
4293 * for each Tx queue
4294 * set the check flag
4295 * for each q_vector
4296 * force an interrupt
4297 */
4298 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4299 struct i40e_vsi *vsi = pf->vsi[v];
4300 int armed = 0;
4301
4302 if (!pf->vsi[v] ||
4303 test_bit(__I40E_DOWN, &vsi->state) ||
4304 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4305 continue;
4306
4307 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 4308 set_check_for_tx_hang(vsi->tx_rings[i]);
41c445ff 4309 if (test_bit(__I40E_HANG_CHECK_ARMED,
9f65e15b 4310 &vsi->tx_rings[i]->state))
41c445ff
JB
4311 armed++;
4312 }
4313
4314 if (armed) {
4315 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4316 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4317 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4318 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4319 } else {
4320 u16 vec = vsi->base_vector - 1;
4321 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4322 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4323 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4324 wr32(&vsi->back->hw,
4325 I40E_PFINT_DYN_CTLN(vec), val);
4326 }
4327 i40e_flush(&vsi->back->hw);
4328 }
4329 }
4330}
4331
4332/**
4333 * i40e_watchdog_subtask - Check and bring link up
4334 * @pf: board private structure
4335 **/
4336static void i40e_watchdog_subtask(struct i40e_pf *pf)
4337{
4338 int i;
4339
4340 /* if interface is down do nothing */
4341 if (test_bit(__I40E_DOWN, &pf->state) ||
4342 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4343 return;
4344
4345 /* Update the stats for active netdevs so the network stack
4346 * can look at updated numbers whenever it cares to
4347 */
4348 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4349 if (pf->vsi[i] && pf->vsi[i]->netdev)
4350 i40e_update_stats(pf->vsi[i]);
4351
4352 /* Update the stats for the active switching components */
4353 for (i = 0; i < I40E_MAX_VEB; i++)
4354 if (pf->veb[i])
4355 i40e_update_veb_stats(pf->veb[i]);
4356}
4357
4358/**
4359 * i40e_reset_subtask - Set up for resetting the device and driver
4360 * @pf: board private structure
4361 **/
4362static void i40e_reset_subtask(struct i40e_pf *pf)
4363{
4364 u32 reset_flags = 0;
4365
4366 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4367 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4368 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4369 }
4370 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4371 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4372 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4373 }
4374 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4375 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4376 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4377 }
4378 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4379 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4380 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4381 }
4382
4383 /* If there's a recovery already waiting, it takes
4384 * precedence before starting a new reset sequence.
4385 */
4386 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4387 i40e_handle_reset_warning(pf);
4388 return;
4389 }
4390
4391 /* If we're already down or resetting, just bail */
4392 if (reset_flags &&
4393 !test_bit(__I40E_DOWN, &pf->state) &&
4394 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4395 i40e_do_reset(pf, reset_flags);
4396}
4397
4398/**
4399 * i40e_handle_link_event - Handle link event
4400 * @pf: board private structure
4401 * @e: event info posted on ARQ
4402 **/
4403static void i40e_handle_link_event(struct i40e_pf *pf,
4404 struct i40e_arq_event_info *e)
4405{
4406 struct i40e_hw *hw = &pf->hw;
4407 struct i40e_aqc_get_link_status *status =
4408 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4409 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4410
4411 /* save off old link status information */
4412 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4413 sizeof(pf->hw.phy.link_info_old));
4414
4415 /* update link status */
4416 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4417 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4418 hw_link_info->link_info = status->link_info;
4419 hw_link_info->an_info = status->an_info;
4420 hw_link_info->ext_info = status->ext_info;
4421 hw_link_info->lse_enable =
4422 le16_to_cpu(status->command_flags) &
4423 I40E_AQ_LSE_ENABLE;
4424
4425 /* process the event */
4426 i40e_link_event(pf);
4427
4428 /* Do a new status request to re-enable LSE reporting
4429 * and load new status information into the hw struct,
4430 * then see if the status changed while processing the
4431 * initial event.
4432 */
4433 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4434 i40e_link_event(pf);
4435}
4436
4437/**
4438 * i40e_clean_adminq_subtask - Clean the AdminQ rings
4439 * @pf: board private structure
4440 **/
4441static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4442{
4443 struct i40e_arq_event_info event;
4444 struct i40e_hw *hw = &pf->hw;
4445 u16 pending, i = 0;
4446 i40e_status ret;
4447 u16 opcode;
4448 u32 val;
4449
4450 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4451 return;
4452
4453 event.msg_size = I40E_MAX_AQ_BUF_SIZE;
4454 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4455 if (!event.msg_buf)
4456 return;
4457
4458 do {
4459 ret = i40e_clean_arq_element(hw, &event, &pending);
4460 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4461 dev_info(&pf->pdev->dev, "No ARQ event found\n");
4462 break;
4463 } else if (ret) {
4464 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4465 break;
4466 }
4467
4468 opcode = le16_to_cpu(event.desc.opcode);
4469 switch (opcode) {
4470
4471 case i40e_aqc_opc_get_link_status:
4472 i40e_handle_link_event(pf, &event);
4473 break;
4474 case i40e_aqc_opc_send_msg_to_pf:
4475 ret = i40e_vc_process_vf_msg(pf,
4476 le16_to_cpu(event.desc.retval),
4477 le32_to_cpu(event.desc.cookie_high),
4478 le32_to_cpu(event.desc.cookie_low),
4479 event.msg_buf,
4480 event.msg_size);
4481 break;
4482 case i40e_aqc_opc_lldp_update_mib:
4483 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4484 break;
4485 case i40e_aqc_opc_event_lan_overflow:
4486 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4487 i40e_handle_lan_overflow_event(pf, &event);
4488 break;
4489 default:
4490 dev_info(&pf->pdev->dev,
4491 "ARQ Error: Unknown event %d received\n",
4492 event.desc.opcode);
4493 break;
4494 }
4495 } while (pending && (i++ < pf->adminq_work_limit));
4496
4497 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4498 /* re-enable Admin queue interrupt cause */
4499 val = rd32(hw, I40E_PFINT_ICR0_ENA);
4500 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4501 wr32(hw, I40E_PFINT_ICR0_ENA, val);
4502 i40e_flush(hw);
4503
4504 kfree(event.msg_buf);
4505}
4506
4507/**
4508 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4509 * @veb: pointer to the VEB instance
4510 *
4511 * This is a recursive function that first builds the attached VSIs then
4512 * recurses in to build the next layer of VEB. We track the connections
4513 * through our own index numbers because the seid's from the HW could
4514 * change across the reset.
4515 **/
4516static int i40e_reconstitute_veb(struct i40e_veb *veb)
4517{
4518 struct i40e_vsi *ctl_vsi = NULL;
4519 struct i40e_pf *pf = veb->pf;
4520 int v, veb_idx;
4521 int ret;
4522
4523 /* build VSI that owns this VEB, temporarily attached to base VEB */
4524 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4525 if (pf->vsi[v] &&
4526 pf->vsi[v]->veb_idx == veb->idx &&
4527 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4528 ctl_vsi = pf->vsi[v];
4529 break;
4530 }
4531 }
4532 if (!ctl_vsi) {
4533 dev_info(&pf->pdev->dev,
4534 "missing owner VSI for veb_idx %d\n", veb->idx);
4535 ret = -ENOENT;
4536 goto end_reconstitute;
4537 }
4538 if (ctl_vsi != pf->vsi[pf->lan_vsi])
4539 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4540 ret = i40e_add_vsi(ctl_vsi);
4541 if (ret) {
4542 dev_info(&pf->pdev->dev,
4543 "rebuild of owner VSI failed: %d\n", ret);
4544 goto end_reconstitute;
4545 }
4546 i40e_vsi_reset_stats(ctl_vsi);
4547
4548 /* create the VEB in the switch and move the VSI onto the VEB */
4549 ret = i40e_add_veb(veb, ctl_vsi);
4550 if (ret)
4551 goto end_reconstitute;
4552
4553 /* create the remaining VSIs attached to this VEB */
4554 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4555 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4556 continue;
4557
4558 if (pf->vsi[v]->veb_idx == veb->idx) {
4559 struct i40e_vsi *vsi = pf->vsi[v];
4560 vsi->uplink_seid = veb->seid;
4561 ret = i40e_add_vsi(vsi);
4562 if (ret) {
4563 dev_info(&pf->pdev->dev,
4564 "rebuild of vsi_idx %d failed: %d\n",
4565 v, ret);
4566 goto end_reconstitute;
4567 }
4568 i40e_vsi_reset_stats(vsi);
4569 }
4570 }
4571
4572 /* create any VEBs attached to this VEB - RECURSION */
4573 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
4574 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
4575 pf->veb[veb_idx]->uplink_seid = veb->seid;
4576 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
4577 if (ret)
4578 break;
4579 }
4580 }
4581
4582end_reconstitute:
4583 return ret;
4584}
4585
4586/**
4587 * i40e_get_capabilities - get info about the HW
4588 * @pf: the PF struct
4589 **/
4590static int i40e_get_capabilities(struct i40e_pf *pf)
4591{
4592 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
4593 u16 data_size;
4594 int buf_len;
4595 int err;
4596
4597 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
4598 do {
4599 cap_buf = kzalloc(buf_len, GFP_KERNEL);
4600 if (!cap_buf)
4601 return -ENOMEM;
4602
4603 /* this loads the data into the hw struct for us */
4604 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
4605 &data_size,
4606 i40e_aqc_opc_list_func_capabilities,
4607 NULL);
4608 /* data loaded, buffer no longer needed */
4609 kfree(cap_buf);
4610
4611 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
4612 /* retry with a larger buffer */
4613 buf_len = data_size;
4614 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
4615 dev_info(&pf->pdev->dev,
4616 "capability discovery failed: aq=%d\n",
4617 pf->hw.aq.asq_last_status);
4618 return -ENODEV;
4619 }
4620 } while (err);
4621
7134f9ce
JB
4622 if (pf->hw.revision_id == 0 && pf->hw.func_caps.npar_enable) {
4623 pf->hw.func_caps.num_msix_vectors += 1;
4624 pf->hw.func_caps.num_tx_qp =
4625 min_t(int, pf->hw.func_caps.num_tx_qp,
4626 I40E_MAX_NPAR_QPS);
4627 }
4628
41c445ff
JB
4629 if (pf->hw.debug_mask & I40E_DEBUG_USER)
4630 dev_info(&pf->pdev->dev,
4631 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
4632 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
4633 pf->hw.func_caps.num_msix_vectors,
4634 pf->hw.func_caps.num_msix_vectors_vf,
4635 pf->hw.func_caps.fd_filters_guaranteed,
4636 pf->hw.func_caps.fd_filters_best_effort,
4637 pf->hw.func_caps.num_tx_qp,
4638 pf->hw.func_caps.num_vsis);
4639
7134f9ce
JB
4640#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
4641 + pf->hw.func_caps.num_vfs)
4642 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
4643 dev_info(&pf->pdev->dev,
4644 "got num_vsis %d, setting num_vsis to %d\n",
4645 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
4646 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
4647 }
4648
41c445ff
JB
4649 return 0;
4650}
4651
4652/**
4653 * i40e_fdir_setup - initialize the Flow Director resources
4654 * @pf: board private structure
4655 **/
4656static void i40e_fdir_setup(struct i40e_pf *pf)
4657{
4658 struct i40e_vsi *vsi;
4659 bool new_vsi = false;
4660 int err, i;
4661
958a3e3b
SN
4662 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
4663 I40E_FLAG_FDIR_ATR_ENABLED)))
41c445ff
JB
4664 return;
4665
4666 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
4667
4668 /* find existing or make new FDIR VSI */
4669 vsi = NULL;
4670 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4671 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
4672 vsi = pf->vsi[i];
4673 if (!vsi) {
4674 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
4675 if (!vsi) {
4676 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
4677 pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
4678 return;
4679 }
4680 new_vsi = true;
4681 }
4682 WARN_ON(vsi->base_queue != I40E_FDIR_RING);
4683 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
4684
4685 err = i40e_vsi_setup_tx_resources(vsi);
4686 if (!err)
4687 err = i40e_vsi_setup_rx_resources(vsi);
4688 if (!err)
4689 err = i40e_vsi_configure(vsi);
4690 if (!err && new_vsi) {
4691 char int_name[IFNAMSIZ + 9];
4692 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4693 dev_driver_string(&pf->pdev->dev));
4694 err = i40e_vsi_request_irq(vsi, int_name);
4695 }
4696 if (!err)
4697 err = i40e_up_complete(vsi);
4698
4699 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4700}
4701
4702/**
4703 * i40e_fdir_teardown - release the Flow Director resources
4704 * @pf: board private structure
4705 **/
4706static void i40e_fdir_teardown(struct i40e_pf *pf)
4707{
4708 int i;
4709
4710 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
4711 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
4712 i40e_vsi_release(pf->vsi[i]);
4713 break;
4714 }
4715 }
4716}
4717
4718/**
f650a38b 4719 * i40e_prep_for_reset - prep for the core to reset
41c445ff
JB
4720 * @pf: board private structure
4721 *
f650a38b
ASJ
4722 * Close up the VFs and other things in prep for pf Reset.
4723 **/
4724static int i40e_prep_for_reset(struct i40e_pf *pf)
41c445ff 4725{
41c445ff
JB
4726 struct i40e_hw *hw = &pf->hw;
4727 i40e_status ret;
4728 u32 v;
4729
4730 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
4731 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
f650a38b 4732 return 0;
41c445ff
JB
4733
4734 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
4735
4736 i40e_vc_notify_reset(pf);
4737
4738 /* quiesce the VSIs and their queues that are not already DOWN */
4739 i40e_pf_quiesce_all_vsi(pf);
4740
4741 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4742 if (pf->vsi[v])
4743 pf->vsi[v]->seid = 0;
4744 }
4745
4746 i40e_shutdown_adminq(&pf->hw);
4747
f650a38b
ASJ
4748 /* call shutdown HMC */
4749 ret = i40e_shutdown_lan_hmc(hw);
4750 if (ret) {
4751 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
4752 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4753 }
4754 return ret;
4755}
4756
4757/**
4758 * i40e_reset_and_rebuild - reset and rebuid using a saved config
4759 * @pf: board private structure
4760 **/
4761static void i40e_reset_and_rebuild(struct i40e_pf *pf)
4762{
4763 struct i40e_driver_version dv;
4764 struct i40e_hw *hw = &pf->hw;
4765 i40e_status ret;
4766 u32 v;
4767
41c445ff
JB
4768 /* Now we wait for GRST to settle out.
4769 * We don't have to delete the VEBs or VSIs from the hw switch
4770 * because the reset will make them disappear.
4771 */
4772 ret = i40e_pf_reset(hw);
4773 if (ret)
4774 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
4775 pf->pfr_count++;
4776
4777 if (test_bit(__I40E_DOWN, &pf->state))
4778 goto end_core_reset;
4779 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
4780
4781 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
4782 ret = i40e_init_adminq(&pf->hw);
4783 if (ret) {
4784 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
4785 goto end_core_reset;
4786 }
4787
4788 ret = i40e_get_capabilities(pf);
4789 if (ret) {
4790 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
4791 ret);
4792 goto end_core_reset;
4793 }
4794
41c445ff
JB
4795 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4796 hw->func_caps.num_rx_qp,
4797 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
4798 if (ret) {
4799 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
4800 goto end_core_reset;
4801 }
4802 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4803 if (ret) {
4804 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
4805 goto end_core_reset;
4806 }
4807
4808 /* do basic switch setup */
4809 ret = i40e_setup_pf_switch(pf);
4810 if (ret)
4811 goto end_core_reset;
4812
4813 /* Rebuild the VSIs and VEBs that existed before reset.
4814 * They are still in our local switch element arrays, so only
4815 * need to rebuild the switch model in the HW.
4816 *
4817 * If there were VEBs but the reconstitution failed, we'll try
4818 * try to recover minimal use by getting the basic PF VSI working.
4819 */
4820 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
4821 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
4822 /* find the one VEB connected to the MAC, and find orphans */
4823 for (v = 0; v < I40E_MAX_VEB; v++) {
4824 if (!pf->veb[v])
4825 continue;
4826
4827 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
4828 pf->veb[v]->uplink_seid == 0) {
4829 ret = i40e_reconstitute_veb(pf->veb[v]);
4830
4831 if (!ret)
4832 continue;
4833
4834 /* If Main VEB failed, we're in deep doodoo,
4835 * so give up rebuilding the switch and set up
4836 * for minimal rebuild of PF VSI.
4837 * If orphan failed, we'll report the error
4838 * but try to keep going.
4839 */
4840 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
4841 dev_info(&pf->pdev->dev,
4842 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
4843 ret);
4844 pf->vsi[pf->lan_vsi]->uplink_seid
4845 = pf->mac_seid;
4846 break;
4847 } else if (pf->veb[v]->uplink_seid == 0) {
4848 dev_info(&pf->pdev->dev,
4849 "rebuild of orphan VEB failed: %d\n",
4850 ret);
4851 }
4852 }
4853 }
4854 }
4855
4856 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
4857 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
4858 /* no VEB, so rebuild only the Main VSI */
4859 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
4860 if (ret) {
4861 dev_info(&pf->pdev->dev,
4862 "rebuild of Main VSI failed: %d\n", ret);
4863 goto end_core_reset;
4864 }
4865 }
4866
4867 /* reinit the misc interrupt */
4868 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4869 ret = i40e_setup_misc_vector(pf);
4870
4871 /* restart the VSIs that were rebuilt and running before the reset */
4872 i40e_pf_unquiesce_all_vsi(pf);
4873
4874 /* tell the firmware that we're starting */
4875 dv.major_version = DRV_VERSION_MAJOR;
4876 dv.minor_version = DRV_VERSION_MINOR;
4877 dv.build_version = DRV_VERSION_BUILD;
4878 dv.subbuild_version = 0;
4879 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
4880
4881 dev_info(&pf->pdev->dev, "PF reset done\n");
4882
4883end_core_reset:
4884 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4885}
4886
f650a38b
ASJ
4887/**
4888 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
4889 * @pf: board private structure
4890 *
4891 * Close up the VFs and other things in prep for a Core Reset,
4892 * then get ready to rebuild the world.
4893 **/
4894static void i40e_handle_reset_warning(struct i40e_pf *pf)
4895{
4896 i40e_status ret;
4897
4898 ret = i40e_prep_for_reset(pf);
4899 if (!ret)
4900 i40e_reset_and_rebuild(pf);
4901}
4902
41c445ff
JB
4903/**
4904 * i40e_handle_mdd_event
4905 * @pf: pointer to the pf structure
4906 *
4907 * Called from the MDD irq handler to identify possibly malicious vfs
4908 **/
4909static void i40e_handle_mdd_event(struct i40e_pf *pf)
4910{
4911 struct i40e_hw *hw = &pf->hw;
4912 bool mdd_detected = false;
4913 struct i40e_vf *vf;
4914 u32 reg;
4915 int i;
4916
4917 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
4918 return;
4919
4920 /* find what triggered the MDD event */
4921 reg = rd32(hw, I40E_GL_MDET_TX);
4922 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4923 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
4924 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
4925 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
4926 >> I40E_GL_MDET_TX_EVENT_SHIFT;
4927 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
4928 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
4929 dev_info(&pf->pdev->dev,
4930 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
4931 event, queue, func);
4932 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4933 mdd_detected = true;
4934 }
4935 reg = rd32(hw, I40E_GL_MDET_RX);
4936 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4937 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
4938 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
4939 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
4940 >> I40E_GL_MDET_RX_EVENT_SHIFT;
4941 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
4942 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
4943 dev_info(&pf->pdev->dev,
4944 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
4945 event, queue, func);
4946 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4947 mdd_detected = true;
4948 }
4949
4950 /* see if one of the VFs needs its hand slapped */
4951 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
4952 vf = &(pf->vf[i]);
4953 reg = rd32(hw, I40E_VP_MDET_TX(i));
4954 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
4955 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
4956 vf->num_mdd_events++;
4957 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
4958 }
4959
4960 reg = rd32(hw, I40E_VP_MDET_RX(i));
4961 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
4962 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
4963 vf->num_mdd_events++;
4964 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
4965 }
4966
4967 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
4968 dev_info(&pf->pdev->dev,
4969 "Too many MDD events on VF %d, disabled\n", i);
4970 dev_info(&pf->pdev->dev,
4971 "Use PF Control I/F to re-enable the VF\n");
4972 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
4973 }
4974 }
4975
4976 /* re-enable mdd interrupt cause */
4977 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
4978 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4979 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4980 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4981 i40e_flush(hw);
4982}
4983
4984/**
4985 * i40e_service_task - Run the driver's async subtasks
4986 * @work: pointer to work_struct containing our data
4987 **/
4988static void i40e_service_task(struct work_struct *work)
4989{
4990 struct i40e_pf *pf = container_of(work,
4991 struct i40e_pf,
4992 service_task);
4993 unsigned long start_time = jiffies;
4994
4995 i40e_reset_subtask(pf);
4996 i40e_handle_mdd_event(pf);
4997 i40e_vc_process_vflr_event(pf);
4998 i40e_watchdog_subtask(pf);
4999 i40e_fdir_reinit_subtask(pf);
5000 i40e_check_hang_subtask(pf);
5001 i40e_sync_filters_subtask(pf);
5002 i40e_clean_adminq_subtask(pf);
5003
5004 i40e_service_event_complete(pf);
5005
5006 /* If the tasks have taken longer than one timer cycle or there
5007 * is more work to be done, reschedule the service task now
5008 * rather than wait for the timer to tick again.
5009 */
5010 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
5011 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
5012 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
5013 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
5014 i40e_service_event_schedule(pf);
5015}
5016
5017/**
5018 * i40e_service_timer - timer callback
5019 * @data: pointer to PF struct
5020 **/
5021static void i40e_service_timer(unsigned long data)
5022{
5023 struct i40e_pf *pf = (struct i40e_pf *)data;
5024
5025 mod_timer(&pf->service_timer,
5026 round_jiffies(jiffies + pf->service_timer_period));
5027 i40e_service_event_schedule(pf);
5028}
5029
5030/**
5031 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
5032 * @vsi: the VSI being configured
5033 **/
5034static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
5035{
5036 struct i40e_pf *pf = vsi->back;
5037
5038 switch (vsi->type) {
5039 case I40E_VSI_MAIN:
5040 vsi->alloc_queue_pairs = pf->num_lan_qps;
5041 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5042 I40E_REQ_DESCRIPTOR_MULTIPLE);
5043 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5044 vsi->num_q_vectors = pf->num_lan_msix;
5045 else
5046 vsi->num_q_vectors = 1;
5047
5048 break;
5049
5050 case I40E_VSI_FDIR:
5051 vsi->alloc_queue_pairs = 1;
5052 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
5053 I40E_REQ_DESCRIPTOR_MULTIPLE);
5054 vsi->num_q_vectors = 1;
5055 break;
5056
5057 case I40E_VSI_VMDQ2:
5058 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
5059 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5060 I40E_REQ_DESCRIPTOR_MULTIPLE);
5061 vsi->num_q_vectors = pf->num_vmdq_msix;
5062 break;
5063
5064 case I40E_VSI_SRIOV:
5065 vsi->alloc_queue_pairs = pf->num_vf_qps;
5066 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5067 I40E_REQ_DESCRIPTOR_MULTIPLE);
5068 break;
5069
5070 default:
5071 WARN_ON(1);
5072 return -ENODATA;
5073 }
5074
5075 return 0;
5076}
5077
f650a38b
ASJ
5078/**
5079 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
5080 * @type: VSI pointer
5081 *
5082 * On error: returns error code (negative)
5083 * On success: returns 0
5084 **/
5085static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi)
5086{
5087 int size;
5088 int ret = 0;
5089
5090 /* allocate memory for ring pointers */
5091 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5092 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
5093 if (!vsi->tx_rings)
5094 return -ENOMEM;
5095
5096 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5097
5098 /* allocate memory for q_vector pointers */
5099 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5100 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
5101 if (!vsi->q_vectors) {
5102 ret = -ENOMEM;
5103 goto err_vectors;
5104 }
5105 return ret;
5106
5107err_vectors:
5108 kfree(vsi->tx_rings);
5109 return ret;
5110}
5111
41c445ff
JB
5112/**
5113 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
5114 * @pf: board private structure
5115 * @type: type of VSI
5116 *
5117 * On error: returns error code (negative)
5118 * On success: returns vsi index in PF (positive)
5119 **/
5120static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5121{
5122 int ret = -ENODEV;
5123 struct i40e_vsi *vsi;
5124 int vsi_idx;
5125 int i;
5126
5127 /* Need to protect the allocation of the VSIs at the PF level */
5128 mutex_lock(&pf->switch_mutex);
5129
5130 /* VSI list may be fragmented if VSI creation/destruction has
5131 * been happening. We can afford to do a quick scan to look
5132 * for any free VSIs in the list.
5133 *
5134 * find next empty vsi slot, looping back around if necessary
5135 */
5136 i = pf->next_vsi;
5137 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
5138 i++;
5139 if (i >= pf->hw.func_caps.num_vsis) {
5140 i = 0;
5141 while (i < pf->next_vsi && pf->vsi[i])
5142 i++;
5143 }
5144
5145 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
5146 vsi_idx = i; /* Found one! */
5147 } else {
5148 ret = -ENODEV;
493fb300 5149 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
5150 }
5151 pf->next_vsi = ++i;
5152
5153 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
5154 if (!vsi) {
5155 ret = -ENOMEM;
493fb300 5156 goto unlock_pf;
41c445ff
JB
5157 }
5158 vsi->type = type;
5159 vsi->back = pf;
5160 set_bit(__I40E_DOWN, &vsi->state);
5161 vsi->flags = 0;
5162 vsi->idx = vsi_idx;
5163 vsi->rx_itr_setting = pf->rx_itr_default;
5164 vsi->tx_itr_setting = pf->tx_itr_default;
5165 vsi->netdev_registered = false;
5166 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5167 INIT_LIST_HEAD(&vsi->mac_filter_list);
5168
9f65e15b
AD
5169 ret = i40e_set_num_rings_in_vsi(vsi);
5170 if (ret)
5171 goto err_rings;
5172
f650a38b
ASJ
5173 ret = i40e_vsi_alloc_arrays(vsi);
5174 if (ret)
9f65e15b 5175 goto err_rings;
493fb300 5176
41c445ff
JB
5177 /* Setup default MSIX irq handler for VSI */
5178 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
5179
5180 pf->vsi[vsi_idx] = vsi;
5181 ret = vsi_idx;
493fb300
AD
5182 goto unlock_pf;
5183
9f65e15b 5184err_rings:
493fb300
AD
5185 pf->next_vsi = i - 1;
5186 kfree(vsi);
5187unlock_pf:
41c445ff
JB
5188 mutex_unlock(&pf->switch_mutex);
5189 return ret;
5190}
5191
f650a38b
ASJ
5192/**
5193 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
5194 * @type: VSI pointer
5195 *
5196 * On error: returns error code (negative)
5197 * On success: returns 0
5198 **/
5199static void i40e_vsi_free_arrays(struct i40e_vsi *vsi)
5200{
5201 /* free the ring and vector containers */
5202 kfree(vsi->q_vectors);
5203 vsi->q_vectors = NULL;
5204 kfree(vsi->tx_rings);
5205 vsi->tx_rings = NULL;
5206 vsi->rx_rings = NULL;
5207}
5208
41c445ff
JB
5209/**
5210 * i40e_vsi_clear - Deallocate the VSI provided
5211 * @vsi: the VSI being un-configured
5212 **/
5213static int i40e_vsi_clear(struct i40e_vsi *vsi)
5214{
5215 struct i40e_pf *pf;
5216
5217 if (!vsi)
5218 return 0;
5219
5220 if (!vsi->back)
5221 goto free_vsi;
5222 pf = vsi->back;
5223
5224 mutex_lock(&pf->switch_mutex);
5225 if (!pf->vsi[vsi->idx]) {
5226 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5227 vsi->idx, vsi->idx, vsi, vsi->type);
5228 goto unlock_vsi;
5229 }
5230
5231 if (pf->vsi[vsi->idx] != vsi) {
5232 dev_err(&pf->pdev->dev,
5233 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5234 pf->vsi[vsi->idx]->idx,
5235 pf->vsi[vsi->idx],
5236 pf->vsi[vsi->idx]->type,
5237 vsi->idx, vsi, vsi->type);
5238 goto unlock_vsi;
5239 }
5240
5241 /* updates the pf for this cleared vsi */
5242 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5243 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5244
f650a38b 5245 i40e_vsi_free_arrays(vsi);
493fb300 5246
41c445ff
JB
5247 pf->vsi[vsi->idx] = NULL;
5248 if (vsi->idx < pf->next_vsi)
5249 pf->next_vsi = vsi->idx;
5250
5251unlock_vsi:
5252 mutex_unlock(&pf->switch_mutex);
5253free_vsi:
5254 kfree(vsi);
5255
5256 return 0;
5257}
5258
9f65e15b
AD
5259/**
5260 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5261 * @vsi: the VSI being cleaned
5262 **/
5263static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
5264{
5265 int i;
5266
00403f04
MW
5267 if (vsi->tx_rings[0])
5268 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
5269 kfree_rcu(vsi->tx_rings[i], rcu);
5270 vsi->tx_rings[i] = NULL;
5271 vsi->rx_rings[i] = NULL;
5272 }
9f65e15b
AD
5273
5274 return 0;
5275}
5276
41c445ff
JB
5277/**
5278 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5279 * @vsi: the VSI being configured
5280 **/
5281static int i40e_alloc_rings(struct i40e_vsi *vsi)
5282{
5283 struct i40e_pf *pf = vsi->back;
41c445ff
JB
5284 int i;
5285
41c445ff
JB
5286 /* Set basic values in the rings to be used later during open() */
5287 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
9f65e15b
AD
5288 struct i40e_ring *tx_ring;
5289 struct i40e_ring *rx_ring;
5290
5291 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5292 if (!tx_ring)
5293 goto err_out;
41c445ff
JB
5294
5295 tx_ring->queue_index = i;
5296 tx_ring->reg_idx = vsi->base_queue + i;
5297 tx_ring->ring_active = false;
5298 tx_ring->vsi = vsi;
5299 tx_ring->netdev = vsi->netdev;
5300 tx_ring->dev = &pf->pdev->dev;
5301 tx_ring->count = vsi->num_desc;
5302 tx_ring->size = 0;
5303 tx_ring->dcb_tc = 0;
9f65e15b 5304 vsi->tx_rings[i] = tx_ring;
41c445ff 5305
9f65e15b 5306 rx_ring = &tx_ring[1];
41c445ff
JB
5307 rx_ring->queue_index = i;
5308 rx_ring->reg_idx = vsi->base_queue + i;
5309 rx_ring->ring_active = false;
5310 rx_ring->vsi = vsi;
5311 rx_ring->netdev = vsi->netdev;
5312 rx_ring->dev = &pf->pdev->dev;
5313 rx_ring->count = vsi->num_desc;
5314 rx_ring->size = 0;
5315 rx_ring->dcb_tc = 0;
5316 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5317 set_ring_16byte_desc_enabled(rx_ring);
5318 else
5319 clear_ring_16byte_desc_enabled(rx_ring);
9f65e15b 5320 vsi->rx_rings[i] = rx_ring;
41c445ff
JB
5321 }
5322
5323 return 0;
9f65e15b
AD
5324
5325err_out:
5326 i40e_vsi_clear_rings(vsi);
5327 return -ENOMEM;
41c445ff
JB
5328}
5329
5330/**
5331 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
5332 * @pf: board private structure
5333 * @vectors: the number of MSI-X vectors to request
5334 *
5335 * Returns the number of vectors reserved, or error
5336 **/
5337static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5338{
5339 int err = 0;
5340
5341 pf->num_msix_entries = 0;
5342 while (vectors >= I40E_MIN_MSIX) {
5343 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5344 if (err == 0) {
5345 /* good to go */
5346 pf->num_msix_entries = vectors;
5347 break;
5348 } else if (err < 0) {
5349 /* total failure */
5350 dev_info(&pf->pdev->dev,
5351 "MSI-X vector reservation failed: %d\n", err);
5352 vectors = 0;
5353 break;
5354 } else {
5355 /* err > 0 is the hint for retry */
5356 dev_info(&pf->pdev->dev,
5357 "MSI-X vectors wanted %d, retrying with %d\n",
5358 vectors, err);
5359 vectors = err;
5360 }
5361 }
5362
5363 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5364 dev_info(&pf->pdev->dev,
5365 "Couldn't get enough vectors, only %d available\n",
5366 vectors);
5367 vectors = 0;
5368 }
5369
5370 return vectors;
5371}
5372
5373/**
5374 * i40e_init_msix - Setup the MSIX capability
5375 * @pf: board private structure
5376 *
5377 * Work with the OS to set up the MSIX vectors needed.
5378 *
5379 * Returns 0 on success, negative on failure
5380 **/
5381static int i40e_init_msix(struct i40e_pf *pf)
5382{
5383 i40e_status err = 0;
5384 struct i40e_hw *hw = &pf->hw;
5385 int v_budget, i;
5386 int vec;
5387
5388 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5389 return -ENODEV;
5390
5391 /* The number of vectors we'll request will be comprised of:
5392 * - Add 1 for "other" cause for Admin Queue events, etc.
5393 * - The number of LAN queue pairs
5394 * already adjusted for the NUMA node
5395 * assumes symmetric Tx/Rx pairing
5396 * - The number of VMDq pairs
5397 * Once we count this up, try the request.
5398 *
5399 * If we can't get what we want, we'll simplify to nearly nothing
5400 * and try again. If that still fails, we punt.
5401 */
5402 pf->num_lan_msix = pf->num_lan_qps;
5403 pf->num_vmdq_msix = pf->num_vmdq_qps;
5404 v_budget = 1 + pf->num_lan_msix;
5405 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5406 if (pf->flags & I40E_FLAG_FDIR_ENABLED)
5407 v_budget++;
5408
5409 /* Scale down if necessary, and the rings will share vectors */
5410 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5411
5412 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5413 GFP_KERNEL);
5414 if (!pf->msix_entries)
5415 return -ENOMEM;
5416
5417 for (i = 0; i < v_budget; i++)
5418 pf->msix_entries[i].entry = i;
5419 vec = i40e_reserve_msix_vectors(pf, v_budget);
5420 if (vec < I40E_MIN_MSIX) {
5421 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5422 kfree(pf->msix_entries);
5423 pf->msix_entries = NULL;
5424 return -ENODEV;
5425
5426 } else if (vec == I40E_MIN_MSIX) {
5427 /* Adjust for minimal MSIX use */
5428 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5429 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5430 pf->num_vmdq_vsis = 0;
5431 pf->num_vmdq_qps = 0;
5432 pf->num_vmdq_msix = 0;
5433 pf->num_lan_qps = 1;
5434 pf->num_lan_msix = 1;
5435
5436 } else if (vec != v_budget) {
5437 /* Scale vector usage down */
5438 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
5439 vec--; /* reserve the misc vector */
5440
5441 /* partition out the remaining vectors */
5442 switch (vec) {
5443 case 2:
5444 pf->num_vmdq_vsis = 1;
5445 pf->num_lan_msix = 1;
5446 break;
5447 case 3:
5448 pf->num_vmdq_vsis = 1;
5449 pf->num_lan_msix = 2;
5450 break;
5451 default:
5452 pf->num_lan_msix = min_t(int, (vec / 2),
5453 pf->num_lan_qps);
5454 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5455 I40E_DEFAULT_NUM_VMDQ_VSI);
5456 break;
5457 }
5458 }
5459
5460 return err;
5461}
5462
493fb300
AD
5463/**
5464 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
5465 * @vsi: the VSI being configured
5466 * @v_idx: index of the vector in the vsi struct
5467 *
5468 * We allocate one q_vector. If allocation fails we return -ENOMEM.
5469 **/
5470static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5471{
5472 struct i40e_q_vector *q_vector;
5473
5474 /* allocate q_vector */
5475 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
5476 if (!q_vector)
5477 return -ENOMEM;
5478
5479 q_vector->vsi = vsi;
5480 q_vector->v_idx = v_idx;
5481 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
5482 if (vsi->netdev)
5483 netif_napi_add(vsi->netdev, &q_vector->napi,
5484 i40e_napi_poll, vsi->work_limit);
5485
cd0b6fa6
AD
5486 q_vector->rx.latency_range = I40E_LOW_LATENCY;
5487 q_vector->tx.latency_range = I40E_LOW_LATENCY;
5488
493fb300
AD
5489 /* tie q_vector and vsi together */
5490 vsi->q_vectors[v_idx] = q_vector;
5491
5492 return 0;
5493}
5494
41c445ff
JB
5495/**
5496 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5497 * @vsi: the VSI being configured
5498 *
5499 * We allocate one q_vector per queue interrupt. If allocation fails we
5500 * return -ENOMEM.
5501 **/
5502static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5503{
5504 struct i40e_pf *pf = vsi->back;
5505 int v_idx, num_q_vectors;
493fb300 5506 int err;
41c445ff
JB
5507
5508 /* if not MSIX, give the one vector only to the LAN VSI */
5509 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5510 num_q_vectors = vsi->num_q_vectors;
5511 else if (vsi == pf->vsi[pf->lan_vsi])
5512 num_q_vectors = 1;
5513 else
5514 return -EINVAL;
5515
41c445ff 5516 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
493fb300
AD
5517 err = i40e_alloc_q_vector(vsi, v_idx);
5518 if (err)
5519 goto err_out;
41c445ff
JB
5520 }
5521
5522 return 0;
493fb300
AD
5523
5524err_out:
5525 while (v_idx--)
5526 i40e_free_q_vector(vsi, v_idx);
5527
5528 return err;
41c445ff
JB
5529}
5530
5531/**
5532 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
5533 * @pf: board private structure to initialize
5534 **/
5535static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5536{
5537 int err = 0;
5538
5539 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5540 err = i40e_init_msix(pf);
5541 if (err) {
958a3e3b
SN
5542 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
5543 I40E_FLAG_RSS_ENABLED |
41c445ff
JB
5544 I40E_FLAG_MQ_ENABLED |
5545 I40E_FLAG_DCB_ENABLED |
5546 I40E_FLAG_SRIOV_ENABLED |
5547 I40E_FLAG_FDIR_ENABLED |
5548 I40E_FLAG_FDIR_ATR_ENABLED |
5549 I40E_FLAG_VMDQ_ENABLED);
5550
5551 /* rework the queue expectations without MSIX */
5552 i40e_determine_queue_usage(pf);
5553 }
5554 }
5555
5556 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5557 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
958a3e3b 5558 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
41c445ff
JB
5559 err = pci_enable_msi(pf->pdev);
5560 if (err) {
958a3e3b 5561 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
41c445ff
JB
5562 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5563 }
5564 }
5565
958a3e3b
SN
5566 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
5567 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
5568
41c445ff
JB
5569 /* track first vector for misc interrupts */
5570 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5571}
5572
5573/**
5574 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
5575 * @pf: board private structure
5576 *
5577 * This sets up the handler for MSIX 0, which is used to manage the
5578 * non-queue interrupts, e.g. AdminQ and errors. This is not used
5579 * when in MSI or Legacy interrupt mode.
5580 **/
5581static int i40e_setup_misc_vector(struct i40e_pf *pf)
5582{
5583 struct i40e_hw *hw = &pf->hw;
5584 int err = 0;
5585
5586 /* Only request the irq if this is the first time through, and
5587 * not when we're rebuilding after a Reset
5588 */
5589 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
5590 err = request_irq(pf->msix_entries[0].vector,
5591 i40e_intr, 0, pf->misc_int_name, pf);
5592 if (err) {
5593 dev_info(&pf->pdev->dev,
5594 "request_irq for msix_misc failed: %d\n", err);
5595 return -EFAULT;
5596 }
5597 }
5598
5599 i40e_enable_misc_int_causes(hw);
5600
5601 /* associate no queues to the misc vector */
5602 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
5603 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
5604
5605 i40e_flush(hw);
5606
5607 i40e_irq_dynamic_enable_icr0(pf);
5608
5609 return err;
5610}
5611
5612/**
5613 * i40e_config_rss - Prepare for RSS if used
5614 * @pf: board private structure
5615 **/
5616static int i40e_config_rss(struct i40e_pf *pf)
5617{
5618 struct i40e_hw *hw = &pf->hw;
5619 u32 lut = 0;
5620 int i, j;
5621 u64 hena;
5622 /* Set of random keys generated using kernel random number generator */
5623 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
5624 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
5625 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
5626 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
5627
5628 /* Fill out hash function seed */
5629 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5630 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
5631
5632 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
5633 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
5634 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
5635 hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5636 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
5637 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
5638 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
5639 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
5640 ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5641 ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
5642 ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
5643 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
5644 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
5645 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
5646 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
5647
5648 /* Populate the LUT with max no. of queues in round robin fashion */
5649 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
5650
5651 /* The assumption is that lan qp count will be the highest
5652 * qp count for any PF VSI that needs RSS.
5653 * If multiple VSIs need RSS support, all the qp counts
5654 * for those VSIs should be a power of 2 for RSS to work.
5655 * If LAN VSI is the only consumer for RSS then this requirement
5656 * is not necessary.
5657 */
5658 if (j == pf->rss_size)
5659 j = 0;
5660 /* lut = 4-byte sliding window of 4 lut entries */
5661 lut = (lut << 8) | (j &
5662 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
5663 /* On i = 3, we have 4 entries in lut; write to the register */
5664 if ((i & 3) == 3)
5665 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
5666 }
5667 i40e_flush(hw);
5668
5669 return 0;
5670}
5671
5672/**
5673 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
5674 * @pf: board private structure to initialize
5675 *
5676 * i40e_sw_init initializes the Adapter private data structure.
5677 * Fields are initialized based on PCI device information and
5678 * OS network device settings (MTU size).
5679 **/
5680static int i40e_sw_init(struct i40e_pf *pf)
5681{
5682 int err = 0;
5683 int size;
5684
5685 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
5686 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
2759997b 5687 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
41c445ff
JB
5688 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
5689 if (I40E_DEBUG_USER & debug)
5690 pf->hw.debug_mask = debug;
5691 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
5692 I40E_DEFAULT_MSG_ENABLE);
5693 }
5694
5695 /* Set default capability flags */
5696 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
5697 I40E_FLAG_MSI_ENABLED |
5698 I40E_FLAG_MSIX_ENABLED |
5699 I40E_FLAG_RX_PS_ENABLED |
5700 I40E_FLAG_MQ_ENABLED |
5701 I40E_FLAG_RX_1BUF_ENABLED;
5702
7134f9ce
JB
5703 /* Depending on PF configurations, it is possible that the RSS
5704 * maximum might end up larger than the available queues
5705 */
41c445ff 5706 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7134f9ce
JB
5707 pf->rss_size_max = min_t(int, pf->rss_size_max,
5708 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
5709 if (pf->hw.func_caps.rss) {
5710 pf->flags |= I40E_FLAG_RSS_ENABLED;
5711 pf->rss_size = min_t(int, pf->rss_size_max,
5712 nr_cpus_node(numa_node_id()));
5713 } else {
5714 pf->rss_size = 1;
5715 }
5716
5717 if (pf->hw.func_caps.dcb)
5718 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
5719 else
5720 pf->num_tc_qps = 0;
5721
5722 if (pf->hw.func_caps.fd) {
5723 /* FW/NVM is not yet fixed in this regard */
5724 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
5725 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
5726 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
5727 dev_info(&pf->pdev->dev,
5728 "Flow Director ATR mode Enabled\n");
5729 pf->flags |= I40E_FLAG_FDIR_ENABLED;
5730 dev_info(&pf->pdev->dev,
5731 "Flow Director Side Band mode Enabled\n");
5732 pf->fdir_pf_filter_count =
5733 pf->hw.func_caps.fd_filters_guaranteed;
5734 }
5735 } else {
5736 pf->fdir_pf_filter_count = 0;
5737 }
5738
5739 if (pf->hw.func_caps.vmdq) {
5740 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
5741 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
5742 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
5743 }
5744
5745 /* MFP mode enabled */
5746 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
5747 pf->flags |= I40E_FLAG_MFP_ENABLED;
5748 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
5749 }
5750
5751#ifdef CONFIG_PCI_IOV
5752 if (pf->hw.func_caps.num_vfs) {
5753 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
5754 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
5755 pf->num_req_vfs = min_t(int,
5756 pf->hw.func_caps.num_vfs,
5757 I40E_MAX_VF_COUNT);
5758 }
5759#endif /* CONFIG_PCI_IOV */
5760 pf->eeprom_version = 0xDEAD;
5761 pf->lan_veb = I40E_NO_VEB;
5762 pf->lan_vsi = I40E_NO_VSI;
5763
5764 /* set up queue assignment tracking */
5765 size = sizeof(struct i40e_lump_tracking)
5766 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
5767 pf->qp_pile = kzalloc(size, GFP_KERNEL);
5768 if (!pf->qp_pile) {
5769 err = -ENOMEM;
5770 goto sw_init_done;
5771 }
5772 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
5773 pf->qp_pile->search_hint = 0;
5774
5775 /* set up vector assignment tracking */
5776 size = sizeof(struct i40e_lump_tracking)
5777 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
5778 pf->irq_pile = kzalloc(size, GFP_KERNEL);
5779 if (!pf->irq_pile) {
5780 kfree(pf->qp_pile);
5781 err = -ENOMEM;
5782 goto sw_init_done;
5783 }
5784 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
5785 pf->irq_pile->search_hint = 0;
5786
5787 mutex_init(&pf->switch_mutex);
5788
5789sw_init_done:
5790 return err;
5791}
5792
5793/**
5794 * i40e_set_features - set the netdev feature flags
5795 * @netdev: ptr to the netdev being adjusted
5796 * @features: the feature set that the stack is suggesting
5797 **/
5798static int i40e_set_features(struct net_device *netdev,
5799 netdev_features_t features)
5800{
5801 struct i40e_netdev_priv *np = netdev_priv(netdev);
5802 struct i40e_vsi *vsi = np->vsi;
5803
5804 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5805 i40e_vlan_stripping_enable(vsi);
5806 else
5807 i40e_vlan_stripping_disable(vsi);
5808
5809 return 0;
5810}
5811
5812static const struct net_device_ops i40e_netdev_ops = {
5813 .ndo_open = i40e_open,
5814 .ndo_stop = i40e_close,
5815 .ndo_start_xmit = i40e_lan_xmit_frame,
5816 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
5817 .ndo_set_rx_mode = i40e_set_rx_mode,
5818 .ndo_validate_addr = eth_validate_addr,
5819 .ndo_set_mac_address = i40e_set_mac,
5820 .ndo_change_mtu = i40e_change_mtu,
5821 .ndo_tx_timeout = i40e_tx_timeout,
5822 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
5823 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
5824#ifdef CONFIG_NET_POLL_CONTROLLER
5825 .ndo_poll_controller = i40e_netpoll,
5826#endif
5827 .ndo_setup_tc = i40e_setup_tc,
5828 .ndo_set_features = i40e_set_features,
5829 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
5830 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
5831 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
5832 .ndo_get_vf_config = i40e_ndo_get_vf_config,
5833};
5834
5835/**
5836 * i40e_config_netdev - Setup the netdev flags
5837 * @vsi: the VSI being configured
5838 *
5839 * Returns 0 on success, negative value on failure
5840 **/
5841static int i40e_config_netdev(struct i40e_vsi *vsi)
5842{
5843 struct i40e_pf *pf = vsi->back;
5844 struct i40e_hw *hw = &pf->hw;
5845 struct i40e_netdev_priv *np;
5846 struct net_device *netdev;
5847 u8 mac_addr[ETH_ALEN];
5848 int etherdev_size;
5849
5850 etherdev_size = sizeof(struct i40e_netdev_priv);
5851 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
5852 if (!netdev)
5853 return -ENOMEM;
5854
5855 vsi->netdev = netdev;
5856 np = netdev_priv(netdev);
5857 np->vsi = vsi;
5858
5859 netdev->hw_enc_features = NETIF_F_IP_CSUM |
5860 NETIF_F_GSO_UDP_TUNNEL |
5861 NETIF_F_TSO |
5862 NETIF_F_SG;
5863
5864 netdev->features = NETIF_F_SG |
5865 NETIF_F_IP_CSUM |
5866 NETIF_F_SCTP_CSUM |
5867 NETIF_F_HIGHDMA |
5868 NETIF_F_GSO_UDP_TUNNEL |
5869 NETIF_F_HW_VLAN_CTAG_TX |
5870 NETIF_F_HW_VLAN_CTAG_RX |
5871 NETIF_F_HW_VLAN_CTAG_FILTER |
5872 NETIF_F_IPV6_CSUM |
5873 NETIF_F_TSO |
5874 NETIF_F_TSO6 |
5875 NETIF_F_RXCSUM |
5876 NETIF_F_RXHASH |
5877 0;
5878
5879 /* copy netdev features into list of user selectable features */
5880 netdev->hw_features |= netdev->features;
5881
5882 if (vsi->type == I40E_VSI_MAIN) {
5883 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
5884 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
5885 } else {
5886 /* relate the VSI_VMDQ name to the VSI_MAIN name */
5887 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
5888 pf->vsi[pf->lan_vsi]->netdev->name);
5889 random_ether_addr(mac_addr);
5890 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
5891 }
5892
5893 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
5894 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
5895 /* vlan gets same features (except vlan offload)
5896 * after any tweaks for specific VSI types
5897 */
5898 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
5899 NETIF_F_HW_VLAN_CTAG_RX |
5900 NETIF_F_HW_VLAN_CTAG_FILTER);
5901 netdev->priv_flags |= IFF_UNICAST_FLT;
5902 netdev->priv_flags |= IFF_SUPP_NOFCS;
5903 /* Setup netdev TC information */
5904 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
5905
5906 netdev->netdev_ops = &i40e_netdev_ops;
5907 netdev->watchdog_timeo = 5 * HZ;
5908 i40e_set_ethtool_ops(netdev);
5909
5910 return 0;
5911}
5912
5913/**
5914 * i40e_vsi_delete - Delete a VSI from the switch
5915 * @vsi: the VSI being removed
5916 *
5917 * Returns 0 on success, negative value on failure
5918 **/
5919static void i40e_vsi_delete(struct i40e_vsi *vsi)
5920{
5921 /* remove default VSI is not allowed */
5922 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
5923 return;
5924
5925 /* there is no HW VSI for FDIR */
5926 if (vsi->type == I40E_VSI_FDIR)
5927 return;
5928
5929 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
5930 return;
5931}
5932
5933/**
5934 * i40e_add_vsi - Add a VSI to the switch
5935 * @vsi: the VSI being configured
5936 *
5937 * This initializes a VSI context depending on the VSI type to be added and
5938 * passes it down to the add_vsi aq command.
5939 **/
5940static int i40e_add_vsi(struct i40e_vsi *vsi)
5941{
5942 int ret = -ENODEV;
5943 struct i40e_mac_filter *f, *ftmp;
5944 struct i40e_pf *pf = vsi->back;
5945 struct i40e_hw *hw = &pf->hw;
5946 struct i40e_vsi_context ctxt;
5947 u8 enabled_tc = 0x1; /* TC0 enabled */
5948 int f_count = 0;
5949
5950 memset(&ctxt, 0, sizeof(ctxt));
5951 switch (vsi->type) {
5952 case I40E_VSI_MAIN:
5953 /* The PF's main VSI is already setup as part of the
5954 * device initialization, so we'll not bother with
5955 * the add_vsi call, but we will retrieve the current
5956 * VSI context.
5957 */
5958 ctxt.seid = pf->main_vsi_seid;
5959 ctxt.pf_num = pf->hw.pf_id;
5960 ctxt.vf_num = 0;
5961 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5962 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5963 if (ret) {
5964 dev_info(&pf->pdev->dev,
5965 "couldn't get pf vsi config, err %d, aq_err %d\n",
5966 ret, pf->hw.aq.asq_last_status);
5967 return -ENOENT;
5968 }
5969 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5970 vsi->info.valid_sections = 0;
5971
5972 vsi->seid = ctxt.seid;
5973 vsi->id = ctxt.vsi_number;
5974
5975 enabled_tc = i40e_pf_get_tc_map(pf);
5976
5977 /* MFP mode setup queue map and update VSI */
5978 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5979 memset(&ctxt, 0, sizeof(ctxt));
5980 ctxt.seid = pf->main_vsi_seid;
5981 ctxt.pf_num = pf->hw.pf_id;
5982 ctxt.vf_num = 0;
5983 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5984 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5985 if (ret) {
5986 dev_info(&pf->pdev->dev,
5987 "update vsi failed, aq_err=%d\n",
5988 pf->hw.aq.asq_last_status);
5989 ret = -ENOENT;
5990 goto err;
5991 }
5992 /* update the local VSI info queue map */
5993 i40e_vsi_update_queue_map(vsi, &ctxt);
5994 vsi->info.valid_sections = 0;
5995 } else {
5996 /* Default/Main VSI is only enabled for TC0
5997 * reconfigure it to enable all TCs that are
5998 * available on the port in SFP mode.
5999 */
6000 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6001 if (ret) {
6002 dev_info(&pf->pdev->dev,
6003 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
6004 enabled_tc, ret,
6005 pf->hw.aq.asq_last_status);
6006 ret = -ENOENT;
6007 }
6008 }
6009 break;
6010
6011 case I40E_VSI_FDIR:
6012 /* no queue mapping or actual HW VSI needed */
6013 vsi->info.valid_sections = 0;
6014 vsi->seid = 0;
6015 vsi->id = 0;
6016 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6017 return 0;
6018 break;
6019
6020 case I40E_VSI_VMDQ2:
6021 ctxt.pf_num = hw->pf_id;
6022 ctxt.vf_num = 0;
6023 ctxt.uplink_seid = vsi->uplink_seid;
6024 ctxt.connection_type = 0x1; /* regular data port */
6025 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6026
6027 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6028
6029 /* This VSI is connected to VEB so the switch_id
6030 * should be set to zero by default.
6031 */
6032 ctxt.info.switch_id = 0;
6033 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6034 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6035
6036 /* Setup the VSI tx/rx queue map for TC0 only for now */
6037 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6038 break;
6039
6040 case I40E_VSI_SRIOV:
6041 ctxt.pf_num = hw->pf_id;
6042 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
6043 ctxt.uplink_seid = vsi->uplink_seid;
6044 ctxt.connection_type = 0x1; /* regular data port */
6045 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6046
6047 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6048
6049 /* This VSI is connected to VEB so the switch_id
6050 * should be set to zero by default.
6051 */
6052 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6053
6054 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6055 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6056 /* Setup the VSI tx/rx queue map for TC0 only for now */
6057 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6058 break;
6059
6060 default:
6061 return -ENODEV;
6062 }
6063
6064 if (vsi->type != I40E_VSI_MAIN) {
6065 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6066 if (ret) {
6067 dev_info(&vsi->back->pdev->dev,
6068 "add vsi failed, aq_err=%d\n",
6069 vsi->back->hw.aq.asq_last_status);
6070 ret = -ENOENT;
6071 goto err;
6072 }
6073 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6074 vsi->info.valid_sections = 0;
6075 vsi->seid = ctxt.seid;
6076 vsi->id = ctxt.vsi_number;
6077 }
6078
6079 /* If macvlan filters already exist, force them to get loaded */
6080 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
6081 f->changed = true;
6082 f_count++;
6083 }
6084 if (f_count) {
6085 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
6086 pf->flags |= I40E_FLAG_FILTER_SYNC;
6087 }
6088
6089 /* Update VSI BW information */
6090 ret = i40e_vsi_get_bw_info(vsi);
6091 if (ret) {
6092 dev_info(&pf->pdev->dev,
6093 "couldn't get vsi bw info, err %d, aq_err %d\n",
6094 ret, pf->hw.aq.asq_last_status);
6095 /* VSI is already added so not tearing that up */
6096 ret = 0;
6097 }
6098
6099err:
6100 return ret;
6101}
6102
6103/**
6104 * i40e_vsi_release - Delete a VSI and free its resources
6105 * @vsi: the VSI being removed
6106 *
6107 * Returns 0 on success or < 0 on error
6108 **/
6109int i40e_vsi_release(struct i40e_vsi *vsi)
6110{
6111 struct i40e_mac_filter *f, *ftmp;
6112 struct i40e_veb *veb = NULL;
6113 struct i40e_pf *pf;
6114 u16 uplink_seid;
6115 int i, n;
6116
6117 pf = vsi->back;
6118
6119 /* release of a VEB-owner or last VSI is not allowed */
6120 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
6121 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
6122 vsi->seid, vsi->uplink_seid);
6123 return -ENODEV;
6124 }
6125 if (vsi == pf->vsi[pf->lan_vsi] &&
6126 !test_bit(__I40E_DOWN, &pf->state)) {
6127 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
6128 return -ENODEV;
6129 }
6130
6131 uplink_seid = vsi->uplink_seid;
6132 if (vsi->type != I40E_VSI_SRIOV) {
6133 if (vsi->netdev_registered) {
6134 vsi->netdev_registered = false;
6135 if (vsi->netdev) {
6136 /* results in a call to i40e_close() */
6137 unregister_netdev(vsi->netdev);
6138 free_netdev(vsi->netdev);
6139 vsi->netdev = NULL;
6140 }
6141 } else {
6142 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
6143 i40e_down(vsi);
6144 i40e_vsi_free_irq(vsi);
6145 i40e_vsi_free_tx_resources(vsi);
6146 i40e_vsi_free_rx_resources(vsi);
6147 }
6148 i40e_vsi_disable_irq(vsi);
6149 }
6150
6151 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
6152 i40e_del_filter(vsi, f->macaddr, f->vlan,
6153 f->is_vf, f->is_netdev);
6154 i40e_sync_vsi_filters(vsi);
6155
6156 i40e_vsi_delete(vsi);
6157 i40e_vsi_free_q_vectors(vsi);
6158 i40e_vsi_clear_rings(vsi);
6159 i40e_vsi_clear(vsi);
6160
6161 /* If this was the last thing on the VEB, except for the
6162 * controlling VSI, remove the VEB, which puts the controlling
6163 * VSI onto the next level down in the switch.
6164 *
6165 * Well, okay, there's one more exception here: don't remove
6166 * the orphan VEBs yet. We'll wait for an explicit remove request
6167 * from up the network stack.
6168 */
6169 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6170 if (pf->vsi[i] &&
6171 pf->vsi[i]->uplink_seid == uplink_seid &&
6172 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6173 n++; /* count the VSIs */
6174 }
6175 }
6176 for (i = 0; i < I40E_MAX_VEB; i++) {
6177 if (!pf->veb[i])
6178 continue;
6179 if (pf->veb[i]->uplink_seid == uplink_seid)
6180 n++; /* count the VEBs */
6181 if (pf->veb[i]->seid == uplink_seid)
6182 veb = pf->veb[i];
6183 }
6184 if (n == 0 && veb && veb->uplink_seid != 0)
6185 i40e_veb_release(veb);
6186
6187 return 0;
6188}
6189
6190/**
6191 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
6192 * @vsi: ptr to the VSI
6193 *
6194 * This should only be called after i40e_vsi_mem_alloc() which allocates the
6195 * corresponding SW VSI structure and initializes num_queue_pairs for the
6196 * newly allocated VSI.
6197 *
6198 * Returns 0 on success or negative on failure
6199 **/
6200static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6201{
6202 int ret = -ENOENT;
6203 struct i40e_pf *pf = vsi->back;
6204
493fb300 6205 if (vsi->q_vectors[0]) {
41c445ff
JB
6206 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
6207 vsi->seid);
6208 return -EEXIST;
6209 }
6210
6211 if (vsi->base_vector) {
6212 dev_info(&pf->pdev->dev,
6213 "VSI %d has non-zero base vector %d\n",
6214 vsi->seid, vsi->base_vector);
6215 return -EEXIST;
6216 }
6217
6218 ret = i40e_alloc_q_vectors(vsi);
6219 if (ret) {
6220 dev_info(&pf->pdev->dev,
6221 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
6222 vsi->num_q_vectors, vsi->seid, ret);
6223 vsi->num_q_vectors = 0;
6224 goto vector_setup_out;
6225 }
6226
958a3e3b
SN
6227 if (vsi->num_q_vectors)
6228 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6229 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
6230 if (vsi->base_vector < 0) {
6231 dev_info(&pf->pdev->dev,
6232 "failed to get q tracking for VSI %d, err=%d\n",
6233 vsi->seid, vsi->base_vector);
6234 i40e_vsi_free_q_vectors(vsi);
6235 ret = -ENOENT;
6236 goto vector_setup_out;
6237 }
6238
6239vector_setup_out:
6240 return ret;
6241}
6242
6243/**
6244 * i40e_vsi_setup - Set up a VSI by a given type
6245 * @pf: board private structure
6246 * @type: VSI type
6247 * @uplink_seid: the switch element to link to
6248 * @param1: usage depends upon VSI type. For VF types, indicates VF id
6249 *
6250 * This allocates the sw VSI structure and its queue resources, then add a VSI
6251 * to the identified VEB.
6252 *
6253 * Returns pointer to the successfully allocated and configure VSI sw struct on
6254 * success, otherwise returns NULL on failure.
6255 **/
6256struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6257 u16 uplink_seid, u32 param1)
6258{
6259 struct i40e_vsi *vsi = NULL;
6260 struct i40e_veb *veb = NULL;
6261 int ret, i;
6262 int v_idx;
6263
6264 /* The requested uplink_seid must be either
6265 * - the PF's port seid
6266 * no VEB is needed because this is the PF
6267 * or this is a Flow Director special case VSI
6268 * - seid of an existing VEB
6269 * - seid of a VSI that owns an existing VEB
6270 * - seid of a VSI that doesn't own a VEB
6271 * a new VEB is created and the VSI becomes the owner
6272 * - seid of the PF VSI, which is what creates the first VEB
6273 * this is a special case of the previous
6274 *
6275 * Find which uplink_seid we were given and create a new VEB if needed
6276 */
6277 for (i = 0; i < I40E_MAX_VEB; i++) {
6278 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6279 veb = pf->veb[i];
6280 break;
6281 }
6282 }
6283
6284 if (!veb && uplink_seid != pf->mac_seid) {
6285
6286 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6287 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6288 vsi = pf->vsi[i];
6289 break;
6290 }
6291 }
6292 if (!vsi) {
6293 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6294 uplink_seid);
6295 return NULL;
6296 }
6297
6298 if (vsi->uplink_seid == pf->mac_seid)
6299 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6300 vsi->tc_config.enabled_tc);
6301 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
6302 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
6303 vsi->tc_config.enabled_tc);
6304
6305 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
6306 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
6307 veb = pf->veb[i];
6308 }
6309 if (!veb) {
6310 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
6311 return NULL;
6312 }
6313
6314 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6315 uplink_seid = veb->seid;
6316 }
6317
6318 /* get vsi sw struct */
6319 v_idx = i40e_vsi_mem_alloc(pf, type);
6320 if (v_idx < 0)
6321 goto err_alloc;
6322 vsi = pf->vsi[v_idx];
6323 vsi->type = type;
6324 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
6325
6326 if (type == I40E_VSI_MAIN)
6327 pf->lan_vsi = v_idx;
6328 else if (type == I40E_VSI_SRIOV)
6329 vsi->vf_id = param1;
6330 /* assign it some queues */
6331 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6332 if (ret < 0) {
6333 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6334 vsi->seid, ret);
6335 goto err_vsi;
6336 }
6337 vsi->base_queue = ret;
6338
6339 /* get a VSI from the hardware */
6340 vsi->uplink_seid = uplink_seid;
6341 ret = i40e_add_vsi(vsi);
6342 if (ret)
6343 goto err_vsi;
6344
6345 switch (vsi->type) {
6346 /* setup the netdev if needed */
6347 case I40E_VSI_MAIN:
6348 case I40E_VSI_VMDQ2:
6349 ret = i40e_config_netdev(vsi);
6350 if (ret)
6351 goto err_netdev;
6352 ret = register_netdev(vsi->netdev);
6353 if (ret)
6354 goto err_netdev;
6355 vsi->netdev_registered = true;
6356 netif_carrier_off(vsi->netdev);
6357 /* fall through */
6358
6359 case I40E_VSI_FDIR:
6360 /* set up vectors and rings if needed */
6361 ret = i40e_vsi_setup_vectors(vsi);
6362 if (ret)
6363 goto err_msix;
6364
6365 ret = i40e_alloc_rings(vsi);
6366 if (ret)
6367 goto err_rings;
6368
6369 /* map all of the rings to the q_vectors */
6370 i40e_vsi_map_rings_to_vectors(vsi);
6371
6372 i40e_vsi_reset_stats(vsi);
6373 break;
6374
6375 default:
6376 /* no netdev or rings for the other VSI types */
6377 break;
6378 }
6379
6380 return vsi;
6381
6382err_rings:
6383 i40e_vsi_free_q_vectors(vsi);
6384err_msix:
6385 if (vsi->netdev_registered) {
6386 vsi->netdev_registered = false;
6387 unregister_netdev(vsi->netdev);
6388 free_netdev(vsi->netdev);
6389 vsi->netdev = NULL;
6390 }
6391err_netdev:
6392 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6393err_vsi:
6394 i40e_vsi_clear(vsi);
6395err_alloc:
6396 return NULL;
6397}
6398
6399/**
6400 * i40e_veb_get_bw_info - Query VEB BW information
6401 * @veb: the veb to query
6402 *
6403 * Query the Tx scheduler BW configuration data for given VEB
6404 **/
6405static int i40e_veb_get_bw_info(struct i40e_veb *veb)
6406{
6407 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
6408 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
6409 struct i40e_pf *pf = veb->pf;
6410 struct i40e_hw *hw = &pf->hw;
6411 u32 tc_bw_max;
6412 int ret = 0;
6413 int i;
6414
6415 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
6416 &bw_data, NULL);
6417 if (ret) {
6418 dev_info(&pf->pdev->dev,
6419 "query veb bw config failed, aq_err=%d\n",
6420 hw->aq.asq_last_status);
6421 goto out;
6422 }
6423
6424 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
6425 &ets_data, NULL);
6426 if (ret) {
6427 dev_info(&pf->pdev->dev,
6428 "query veb bw ets config failed, aq_err=%d\n",
6429 hw->aq.asq_last_status);
6430 goto out;
6431 }
6432
6433 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
6434 veb->bw_max_quanta = ets_data.tc_bw_max;
6435 veb->is_abs_credits = bw_data.absolute_credits_enable;
6436 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
6437 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
6438 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6439 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
6440 veb->bw_tc_limit_credits[i] =
6441 le16_to_cpu(bw_data.tc_bw_limits[i]);
6442 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
6443 }
6444
6445out:
6446 return ret;
6447}
6448
6449/**
6450 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
6451 * @pf: board private structure
6452 *
6453 * On error: returns error code (negative)
6454 * On success: returns vsi index in PF (positive)
6455 **/
6456static int i40e_veb_mem_alloc(struct i40e_pf *pf)
6457{
6458 int ret = -ENOENT;
6459 struct i40e_veb *veb;
6460 int i;
6461
6462 /* Need to protect the allocation of switch elements at the PF level */
6463 mutex_lock(&pf->switch_mutex);
6464
6465 /* VEB list may be fragmented if VEB creation/destruction has
6466 * been happening. We can afford to do a quick scan to look
6467 * for any free slots in the list.
6468 *
6469 * find next empty veb slot, looping back around if necessary
6470 */
6471 i = 0;
6472 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
6473 i++;
6474 if (i >= I40E_MAX_VEB) {
6475 ret = -ENOMEM;
6476 goto err_alloc_veb; /* out of VEB slots! */
6477 }
6478
6479 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
6480 if (!veb) {
6481 ret = -ENOMEM;
6482 goto err_alloc_veb;
6483 }
6484 veb->pf = pf;
6485 veb->idx = i;
6486 veb->enabled_tc = 1;
6487
6488 pf->veb[i] = veb;
6489 ret = i;
6490err_alloc_veb:
6491 mutex_unlock(&pf->switch_mutex);
6492 return ret;
6493}
6494
6495/**
6496 * i40e_switch_branch_release - Delete a branch of the switch tree
6497 * @branch: where to start deleting
6498 *
6499 * This uses recursion to find the tips of the branch to be
6500 * removed, deleting until we get back to and can delete this VEB.
6501 **/
6502static void i40e_switch_branch_release(struct i40e_veb *branch)
6503{
6504 struct i40e_pf *pf = branch->pf;
6505 u16 branch_seid = branch->seid;
6506 u16 veb_idx = branch->idx;
6507 int i;
6508
6509 /* release any VEBs on this VEB - RECURSION */
6510 for (i = 0; i < I40E_MAX_VEB; i++) {
6511 if (!pf->veb[i])
6512 continue;
6513 if (pf->veb[i]->uplink_seid == branch->seid)
6514 i40e_switch_branch_release(pf->veb[i]);
6515 }
6516
6517 /* Release the VSIs on this VEB, but not the owner VSI.
6518 *
6519 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
6520 * the VEB itself, so don't use (*branch) after this loop.
6521 */
6522 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6523 if (!pf->vsi[i])
6524 continue;
6525 if (pf->vsi[i]->uplink_seid == branch_seid &&
6526 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6527 i40e_vsi_release(pf->vsi[i]);
6528 }
6529 }
6530
6531 /* There's one corner case where the VEB might not have been
6532 * removed, so double check it here and remove it if needed.
6533 * This case happens if the veb was created from the debugfs
6534 * commands and no VSIs were added to it.
6535 */
6536 if (pf->veb[veb_idx])
6537 i40e_veb_release(pf->veb[veb_idx]);
6538}
6539
6540/**
6541 * i40e_veb_clear - remove veb struct
6542 * @veb: the veb to remove
6543 **/
6544static void i40e_veb_clear(struct i40e_veb *veb)
6545{
6546 if (!veb)
6547 return;
6548
6549 if (veb->pf) {
6550 struct i40e_pf *pf = veb->pf;
6551
6552 mutex_lock(&pf->switch_mutex);
6553 if (pf->veb[veb->idx] == veb)
6554 pf->veb[veb->idx] = NULL;
6555 mutex_unlock(&pf->switch_mutex);
6556 }
6557
6558 kfree(veb);
6559}
6560
6561/**
6562 * i40e_veb_release - Delete a VEB and free its resources
6563 * @veb: the VEB being removed
6564 **/
6565void i40e_veb_release(struct i40e_veb *veb)
6566{
6567 struct i40e_vsi *vsi = NULL;
6568 struct i40e_pf *pf;
6569 int i, n = 0;
6570
6571 pf = veb->pf;
6572
6573 /* find the remaining VSI and check for extras */
6574 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6575 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
6576 n++;
6577 vsi = pf->vsi[i];
6578 }
6579 }
6580 if (n != 1) {
6581 dev_info(&pf->pdev->dev,
6582 "can't remove VEB %d with %d VSIs left\n",
6583 veb->seid, n);
6584 return;
6585 }
6586
6587 /* move the remaining VSI to uplink veb */
6588 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
6589 if (veb->uplink_seid) {
6590 vsi->uplink_seid = veb->uplink_seid;
6591 if (veb->uplink_seid == pf->mac_seid)
6592 vsi->veb_idx = I40E_NO_VEB;
6593 else
6594 vsi->veb_idx = veb->veb_idx;
6595 } else {
6596 /* floating VEB */
6597 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6598 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
6599 }
6600
6601 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
6602 i40e_veb_clear(veb);
6603
6604 return;
6605}
6606
6607/**
6608 * i40e_add_veb - create the VEB in the switch
6609 * @veb: the VEB to be instantiated
6610 * @vsi: the controlling VSI
6611 **/
6612static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
6613{
6614 bool is_default = (vsi->idx == vsi->back->lan_vsi);
e1c51b95 6615 bool is_cloud = false;
41c445ff
JB
6616 int ret;
6617
6618 /* get a VEB from the hardware */
6619 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
e1c51b95
KS
6620 veb->enabled_tc, is_default,
6621 is_cloud, &veb->seid, NULL);
41c445ff
JB
6622 if (ret) {
6623 dev_info(&veb->pf->pdev->dev,
6624 "couldn't add VEB, err %d, aq_err %d\n",
6625 ret, veb->pf->hw.aq.asq_last_status);
6626 return -EPERM;
6627 }
6628
6629 /* get statistics counter */
6630 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
6631 &veb->stats_idx, NULL, NULL, NULL);
6632 if (ret) {
6633 dev_info(&veb->pf->pdev->dev,
6634 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
6635 ret, veb->pf->hw.aq.asq_last_status);
6636 return -EPERM;
6637 }
6638 ret = i40e_veb_get_bw_info(veb);
6639 if (ret) {
6640 dev_info(&veb->pf->pdev->dev,
6641 "couldn't get VEB bw info, err %d, aq_err %d\n",
6642 ret, veb->pf->hw.aq.asq_last_status);
6643 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
6644 return -ENOENT;
6645 }
6646
6647 vsi->uplink_seid = veb->seid;
6648 vsi->veb_idx = veb->idx;
6649 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6650
6651 return 0;
6652}
6653
6654/**
6655 * i40e_veb_setup - Set up a VEB
6656 * @pf: board private structure
6657 * @flags: VEB setup flags
6658 * @uplink_seid: the switch element to link to
6659 * @vsi_seid: the initial VSI seid
6660 * @enabled_tc: Enabled TC bit-map
6661 *
6662 * This allocates the sw VEB structure and links it into the switch
6663 * It is possible and legal for this to be a duplicate of an already
6664 * existing VEB. It is also possible for both uplink and vsi seids
6665 * to be zero, in order to create a floating VEB.
6666 *
6667 * Returns pointer to the successfully allocated VEB sw struct on
6668 * success, otherwise returns NULL on failure.
6669 **/
6670struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
6671 u16 uplink_seid, u16 vsi_seid,
6672 u8 enabled_tc)
6673{
6674 struct i40e_veb *veb, *uplink_veb = NULL;
6675 int vsi_idx, veb_idx;
6676 int ret;
6677
6678 /* if one seid is 0, the other must be 0 to create a floating relay */
6679 if ((uplink_seid == 0 || vsi_seid == 0) &&
6680 (uplink_seid + vsi_seid != 0)) {
6681 dev_info(&pf->pdev->dev,
6682 "one, not both seid's are 0: uplink=%d vsi=%d\n",
6683 uplink_seid, vsi_seid);
6684 return NULL;
6685 }
6686
6687 /* make sure there is such a vsi and uplink */
6688 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
6689 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
6690 break;
6691 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
6692 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
6693 vsi_seid);
6694 return NULL;
6695 }
6696
6697 if (uplink_seid && uplink_seid != pf->mac_seid) {
6698 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6699 if (pf->veb[veb_idx] &&
6700 pf->veb[veb_idx]->seid == uplink_seid) {
6701 uplink_veb = pf->veb[veb_idx];
6702 break;
6703 }
6704 }
6705 if (!uplink_veb) {
6706 dev_info(&pf->pdev->dev,
6707 "uplink seid %d not found\n", uplink_seid);
6708 return NULL;
6709 }
6710 }
6711
6712 /* get veb sw struct */
6713 veb_idx = i40e_veb_mem_alloc(pf);
6714 if (veb_idx < 0)
6715 goto err_alloc;
6716 veb = pf->veb[veb_idx];
6717 veb->flags = flags;
6718 veb->uplink_seid = uplink_seid;
6719 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
6720 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
6721
6722 /* create the VEB in the switch */
6723 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
6724 if (ret)
6725 goto err_veb;
6726
6727 return veb;
6728
6729err_veb:
6730 i40e_veb_clear(veb);
6731err_alloc:
6732 return NULL;
6733}
6734
6735/**
6736 * i40e_setup_pf_switch_element - set pf vars based on switch type
6737 * @pf: board private structure
6738 * @ele: element we are building info from
6739 * @num_reported: total number of elements
6740 * @printconfig: should we print the contents
6741 *
6742 * helper function to assist in extracting a few useful SEID values.
6743 **/
6744static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
6745 struct i40e_aqc_switch_config_element_resp *ele,
6746 u16 num_reported, bool printconfig)
6747{
6748 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
6749 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
6750 u8 element_type = ele->element_type;
6751 u16 seid = le16_to_cpu(ele->seid);
6752
6753 if (printconfig)
6754 dev_info(&pf->pdev->dev,
6755 "type=%d seid=%d uplink=%d downlink=%d\n",
6756 element_type, seid, uplink_seid, downlink_seid);
6757
6758 switch (element_type) {
6759 case I40E_SWITCH_ELEMENT_TYPE_MAC:
6760 pf->mac_seid = seid;
6761 break;
6762 case I40E_SWITCH_ELEMENT_TYPE_VEB:
6763 /* Main VEB? */
6764 if (uplink_seid != pf->mac_seid)
6765 break;
6766 if (pf->lan_veb == I40E_NO_VEB) {
6767 int v;
6768
6769 /* find existing or else empty VEB */
6770 for (v = 0; v < I40E_MAX_VEB; v++) {
6771 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
6772 pf->lan_veb = v;
6773 break;
6774 }
6775 }
6776 if (pf->lan_veb == I40E_NO_VEB) {
6777 v = i40e_veb_mem_alloc(pf);
6778 if (v < 0)
6779 break;
6780 pf->lan_veb = v;
6781 }
6782 }
6783
6784 pf->veb[pf->lan_veb]->seid = seid;
6785 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
6786 pf->veb[pf->lan_veb]->pf = pf;
6787 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
6788 break;
6789 case I40E_SWITCH_ELEMENT_TYPE_VSI:
6790 if (num_reported != 1)
6791 break;
6792 /* This is immediately after a reset so we can assume this is
6793 * the PF's VSI
6794 */
6795 pf->mac_seid = uplink_seid;
6796 pf->pf_seid = downlink_seid;
6797 pf->main_vsi_seid = seid;
6798 if (printconfig)
6799 dev_info(&pf->pdev->dev,
6800 "pf_seid=%d main_vsi_seid=%d\n",
6801 pf->pf_seid, pf->main_vsi_seid);
6802 break;
6803 case I40E_SWITCH_ELEMENT_TYPE_PF:
6804 case I40E_SWITCH_ELEMENT_TYPE_VF:
6805 case I40E_SWITCH_ELEMENT_TYPE_EMP:
6806 case I40E_SWITCH_ELEMENT_TYPE_BMC:
6807 case I40E_SWITCH_ELEMENT_TYPE_PE:
6808 case I40E_SWITCH_ELEMENT_TYPE_PA:
6809 /* ignore these for now */
6810 break;
6811 default:
6812 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
6813 element_type, seid);
6814 break;
6815 }
6816}
6817
6818/**
6819 * i40e_fetch_switch_configuration - Get switch config from firmware
6820 * @pf: board private structure
6821 * @printconfig: should we print the contents
6822 *
6823 * Get the current switch configuration from the device and
6824 * extract a few useful SEID values.
6825 **/
6826int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
6827{
6828 struct i40e_aqc_get_switch_config_resp *sw_config;
6829 u16 next_seid = 0;
6830 int ret = 0;
6831 u8 *aq_buf;
6832 int i;
6833
6834 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
6835 if (!aq_buf)
6836 return -ENOMEM;
6837
6838 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
6839 do {
6840 u16 num_reported, num_total;
6841
6842 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
6843 I40E_AQ_LARGE_BUF,
6844 &next_seid, NULL);
6845 if (ret) {
6846 dev_info(&pf->pdev->dev,
6847 "get switch config failed %d aq_err=%x\n",
6848 ret, pf->hw.aq.asq_last_status);
6849 kfree(aq_buf);
6850 return -ENOENT;
6851 }
6852
6853 num_reported = le16_to_cpu(sw_config->header.num_reported);
6854 num_total = le16_to_cpu(sw_config->header.num_total);
6855
6856 if (printconfig)
6857 dev_info(&pf->pdev->dev,
6858 "header: %d reported %d total\n",
6859 num_reported, num_total);
6860
6861 if (num_reported) {
6862 int sz = sizeof(*sw_config) * num_reported;
6863
6864 kfree(pf->sw_config);
6865 pf->sw_config = kzalloc(sz, GFP_KERNEL);
6866 if (pf->sw_config)
6867 memcpy(pf->sw_config, sw_config, sz);
6868 }
6869
6870 for (i = 0; i < num_reported; i++) {
6871 struct i40e_aqc_switch_config_element_resp *ele =
6872 &sw_config->element[i];
6873
6874 i40e_setup_pf_switch_element(pf, ele, num_reported,
6875 printconfig);
6876 }
6877 } while (next_seid != 0);
6878
6879 kfree(aq_buf);
6880 return ret;
6881}
6882
6883/**
6884 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
6885 * @pf: board private structure
6886 *
6887 * Returns 0 on success, negative value on failure
6888 **/
6889static int i40e_setup_pf_switch(struct i40e_pf *pf)
6890{
6891 int ret;
6892
6893 /* find out what's out there already */
6894 ret = i40e_fetch_switch_configuration(pf, false);
6895 if (ret) {
6896 dev_info(&pf->pdev->dev,
6897 "couldn't fetch switch config, err %d, aq_err %d\n",
6898 ret, pf->hw.aq.asq_last_status);
6899 return ret;
6900 }
6901 i40e_pf_reset_stats(pf);
6902
6903 /* fdir VSI must happen first to be sure it gets queue 0, but only
6904 * if there is enough room for the fdir VSI
6905 */
6906 if (pf->num_lan_qps > 1)
6907 i40e_fdir_setup(pf);
6908
6909 /* first time setup */
6910 if (pf->lan_vsi == I40E_NO_VSI) {
6911 struct i40e_vsi *vsi = NULL;
6912 u16 uplink_seid;
6913
6914 /* Set up the PF VSI associated with the PF's main VSI
6915 * that is already in the HW switch
6916 */
6917 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6918 uplink_seid = pf->veb[pf->lan_veb]->seid;
6919 else
6920 uplink_seid = pf->mac_seid;
6921
6922 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
6923 if (!vsi) {
6924 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
6925 i40e_fdir_teardown(pf);
6926 return -EAGAIN;
6927 }
6928 /* accommodate kcompat by copying the main VSI queue count
6929 * into the pf, since this newer code pushes the pf queue
6930 * info down a level into a VSI
6931 */
6932 pf->num_rx_queues = vsi->alloc_queue_pairs;
6933 pf->num_tx_queues = vsi->alloc_queue_pairs;
6934 } else {
6935 /* force a reset of TC and queue layout configurations */
6936 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6937 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6938 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6939 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6940 }
6941 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
6942
6943 /* Setup static PF queue filter control settings */
6944 ret = i40e_setup_pf_filter_control(pf);
6945 if (ret) {
6946 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
6947 ret);
6948 /* Failure here should not stop continuing other steps */
6949 }
6950
6951 /* enable RSS in the HW, even for only one queue, as the stack can use
6952 * the hash
6953 */
6954 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
6955 i40e_config_rss(pf);
6956
6957 /* fill in link information and enable LSE reporting */
6958 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
6959 i40e_link_event(pf);
6960
6961 /* Initialize user-specifics link properties */
6962 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
6963 I40E_AQ_AN_COMPLETED) ? true : false);
6964 pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
6965 if (pf->hw.phy.link_info.an_info &
6966 (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
6967 pf->hw.fc.current_mode = I40E_FC_FULL;
6968 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
6969 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
6970 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
6971 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
6972 else
6973 pf->hw.fc.current_mode = I40E_FC_DEFAULT;
6974
6975 return ret;
6976}
6977
6978/**
6979 * i40e_set_rss_size - helper to set rss_size
6980 * @pf: board private structure
6981 * @queues_left: how many queues
6982 */
6983static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
6984{
6985 int num_tc0;
6986
6987 num_tc0 = min_t(int, queues_left, pf->rss_size_max);
6988 num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
6989 num_tc0 = rounddown_pow_of_two(num_tc0);
6990
6991 return num_tc0;
6992}
6993
6994/**
6995 * i40e_determine_queue_usage - Work out queue distribution
6996 * @pf: board private structure
6997 **/
6998static void i40e_determine_queue_usage(struct i40e_pf *pf)
6999{
7000 int accum_tc_size;
7001 int queues_left;
7002
7003 pf->num_lan_qps = 0;
7004 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
7005 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
7006
7007 /* Find the max queues to be put into basic use. We'll always be
7008 * using TC0, whether or not DCB is running, and TC0 will get the
7009 * big RSS set.
7010 */
7011 queues_left = pf->hw.func_caps.num_tx_qp;
7012
7013 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7014 (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
7015 !(pf->flags & (I40E_FLAG_RSS_ENABLED |
7016 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
7017 (queues_left == 1)) {
7018
7019 /* one qp for PF, no queues for anything else */
7020 queues_left = 0;
7021 pf->rss_size = pf->num_lan_qps = 1;
7022
7023 /* make sure all the fancies are disabled */
7024 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
7025 I40E_FLAG_MQ_ENABLED |
7026 I40E_FLAG_FDIR_ENABLED |
7027 I40E_FLAG_FDIR_ATR_ENABLED |
7028 I40E_FLAG_DCB_ENABLED |
7029 I40E_FLAG_SRIOV_ENABLED |
7030 I40E_FLAG_VMDQ_ENABLED);
7031
7032 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7033 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7034 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7035
7036 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7037
7038 queues_left -= pf->rss_size;
7039 pf->num_lan_qps = pf->rss_size;
7040
7041 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7042 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7043 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
7044
7045 /* save num_tc_qps queues for TCs 1 thru 7 and the rest
7046 * are set up for RSS in TC0
7047 */
7048 queues_left -= accum_tc_size;
7049
7050 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7051
7052 queues_left -= pf->rss_size;
7053 if (queues_left < 0) {
7054 dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
7055 return;
7056 }
7057
7058 pf->num_lan_qps = pf->rss_size + accum_tc_size;
7059
7060 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7061 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7062 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7063
7064 queues_left -= 1; /* save 1 queue for FD */
7065
7066 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7067
7068 queues_left -= pf->rss_size;
7069 if (queues_left < 0) {
7070 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
7071 return;
7072 }
7073
7074 pf->num_lan_qps = pf->rss_size;
7075
7076 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7077 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7078 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
7079
7080 /* save 1 queue for TCs 1 thru 7,
7081 * 1 queue for flow director,
7082 * and the rest are set up for RSS in TC0
7083 */
7084 queues_left -= 1;
7085 queues_left -= accum_tc_size;
7086
7087 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7088 queues_left -= pf->rss_size;
7089 if (queues_left < 0) {
7090 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
7091 return;
7092 }
7093
7094 pf->num_lan_qps = pf->rss_size + accum_tc_size;
7095
7096 } else {
7097 dev_info(&pf->pdev->dev,
7098 "Invalid configuration, flags=0x%08llx\n", pf->flags);
7099 return;
7100 }
7101
7102 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7103 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
7104 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
7105 pf->num_vf_qps));
7106 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
7107 }
7108
7109 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7110 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
7111 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
7112 (queues_left / pf->num_vmdq_qps));
7113 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
7114 }
7115
7116 return;
7117}
7118
7119/**
7120 * i40e_setup_pf_filter_control - Setup PF static filter control
7121 * @pf: PF to be setup
7122 *
7123 * i40e_setup_pf_filter_control sets up a pf's initial filter control
7124 * settings. If PE/FCoE are enabled then it will also set the per PF
7125 * based filter sizes required for them. It also enables Flow director,
7126 * ethertype and macvlan type filter settings for the pf.
7127 *
7128 * Returns 0 on success, negative on failure
7129 **/
7130static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
7131{
7132 struct i40e_filter_control_settings *settings = &pf->filter_settings;
7133
7134 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
7135
7136 /* Flow Director is enabled */
7137 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
7138 settings->enable_fdir = true;
7139
7140 /* Ethtype and MACVLAN filters enabled for PF */
7141 settings->enable_ethtype = true;
7142 settings->enable_macvlan = true;
7143
7144 if (i40e_set_filter_control(&pf->hw, settings))
7145 return -ENOENT;
7146
7147 return 0;
7148}
7149
7150/**
7151 * i40e_probe - Device initialization routine
7152 * @pdev: PCI device information struct
7153 * @ent: entry in i40e_pci_tbl
7154 *
7155 * i40e_probe initializes a pf identified by a pci_dev structure.
7156 * The OS initialization, configuring of the pf private structure,
7157 * and a hardware reset occur.
7158 *
7159 * Returns 0 on success, negative on failure
7160 **/
7161static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7162{
7163 struct i40e_driver_version dv;
7164 struct i40e_pf *pf;
7165 struct i40e_hw *hw;
7166 int err = 0;
7167 u32 len;
7168
7169 err = pci_enable_device_mem(pdev);
7170 if (err)
7171 return err;
7172
7173 /* set up for high or low dma */
7174 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7175 /* coherent mask for the same size will always succeed if
7176 * dma_set_mask does
7177 */
7178 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
7179 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7180 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7181 } else {
7182 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
7183 err = -EIO;
7184 goto err_dma;
7185 }
7186
7187 /* set up pci connections */
7188 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7189 IORESOURCE_MEM), i40e_driver_name);
7190 if (err) {
7191 dev_info(&pdev->dev,
7192 "pci_request_selected_regions failed %d\n", err);
7193 goto err_pci_reg;
7194 }
7195
7196 pci_enable_pcie_error_reporting(pdev);
7197 pci_set_master(pdev);
7198
7199 /* Now that we have a PCI connection, we need to do the
7200 * low level device setup. This is primarily setting up
7201 * the Admin Queue structures and then querying for the
7202 * device's current profile information.
7203 */
7204 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
7205 if (!pf) {
7206 err = -ENOMEM;
7207 goto err_pf_alloc;
7208 }
7209 pf->next_vsi = 0;
7210 pf->pdev = pdev;
7211 set_bit(__I40E_DOWN, &pf->state);
7212
7213 hw = &pf->hw;
7214 hw->back = pf;
7215 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7216 pci_resource_len(pdev, 0));
7217 if (!hw->hw_addr) {
7218 err = -EIO;
7219 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
7220 (unsigned int)pci_resource_start(pdev, 0),
7221 (unsigned int)pci_resource_len(pdev, 0), err);
7222 goto err_ioremap;
7223 }
7224 hw->vendor_id = pdev->vendor;
7225 hw->device_id = pdev->device;
7226 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
7227 hw->subsystem_vendor_id = pdev->subsystem_vendor;
7228 hw->subsystem_device_id = pdev->subsystem_device;
7229 hw->bus.device = PCI_SLOT(pdev->devfn);
7230 hw->bus.func = PCI_FUNC(pdev->devfn);
7231
7134f9ce
JB
7232 /* do a special CORER for clearing PXE mode once at init */
7233 if (hw->revision_id == 0 &&
7234 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
7235 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
7236 i40e_flush(hw);
7237 msleep(200);
7238 pf->corer_count++;
7239
7240 i40e_clear_pxe_mode(hw);
7241 }
7242
41c445ff
JB
7243 /* Reset here to make sure all is clean and to define PF 'n' */
7244 err = i40e_pf_reset(hw);
7245 if (err) {
7246 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
7247 goto err_pf_reset;
7248 }
7249 pf->pfr_count++;
7250
7251 hw->aq.num_arq_entries = I40E_AQ_LEN;
7252 hw->aq.num_asq_entries = I40E_AQ_LEN;
7253 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7254 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7255 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
7256 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
7257 "%s-pf%d:misc",
7258 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
7259
7260 err = i40e_init_shared_code(hw);
7261 if (err) {
7262 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
7263 goto err_pf_reset;
7264 }
7265
7266 err = i40e_init_adminq(hw);
7267 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
fe310704
AS
7268 if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
7269 >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
7270 dev_info(&pdev->dev,
7271 "warning: NVM version not supported, supported version: %02x.%02x\n",
7272 I40E_CURRENT_NVM_VERSION_HI,
7273 I40E_CURRENT_NVM_VERSION_LO);
7274 }
41c445ff
JB
7275 if (err) {
7276 dev_info(&pdev->dev,
7277 "init_adminq failed: %d expecting API %02x.%02x\n",
7278 err,
7279 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7280 goto err_pf_reset;
7281 }
7282
7283 err = i40e_get_capabilities(pf);
7284 if (err)
7285 goto err_adminq_setup;
7286
7287 err = i40e_sw_init(pf);
7288 if (err) {
7289 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7290 goto err_sw_init;
7291 }
7292
7293 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7294 hw->func_caps.num_rx_qp,
7295 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7296 if (err) {
7297 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7298 goto err_init_lan_hmc;
7299 }
7300
7301 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7302 if (err) {
7303 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7304 err = -ENOENT;
7305 goto err_configure_lan_hmc;
7306 }
7307
7308 i40e_get_mac_addr(hw, hw->mac.addr);
7309 if (i40e_validate_mac_addr(hw->mac.addr)) {
7310 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7311 err = -EIO;
7312 goto err_mac_addr;
7313 }
7314 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7315 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7316
7317 pci_set_drvdata(pdev, pf);
7318 pci_save_state(pdev);
7319
7320 /* set up periodic task facility */
7321 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
7322 pf->service_timer_period = HZ;
7323
7324 INIT_WORK(&pf->service_task, i40e_service_task);
7325 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7326 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
7327 pf->link_check_timeout = jiffies;
7328
7329 /* set up the main switch operations */
7330 i40e_determine_queue_usage(pf);
7331 i40e_init_interrupt_scheme(pf);
7332
7333 /* Set up the *vsi struct based on the number of VSIs in the HW,
7334 * and set up our local tracking of the MAIN PF vsi.
7335 */
7336 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7337 pf->vsi = kzalloc(len, GFP_KERNEL);
ed87ac09
WY
7338 if (!pf->vsi) {
7339 err = -ENOMEM;
41c445ff 7340 goto err_switch_setup;
ed87ac09 7341 }
41c445ff
JB
7342
7343 err = i40e_setup_pf_switch(pf);
7344 if (err) {
7345 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
7346 goto err_vsis;
7347 }
7348
7349 /* The main driver is (mostly) up and happy. We need to set this state
7350 * before setting up the misc vector or we get a race and the vector
7351 * ends up disabled forever.
7352 */
7353 clear_bit(__I40E_DOWN, &pf->state);
7354
7355 /* In case of MSIX we are going to setup the misc vector right here
7356 * to handle admin queue events etc. In case of legacy and MSI
7357 * the misc functionality and queue processing is combined in
7358 * the same vector and that gets setup at open.
7359 */
7360 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7361 err = i40e_setup_misc_vector(pf);
7362 if (err) {
7363 dev_info(&pdev->dev,
7364 "setup of misc vector failed: %d\n", err);
7365 goto err_vsis;
7366 }
7367 }
7368
7369 /* prep for VF support */
7370 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7371 (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7372 u32 val;
7373
7374 /* disable link interrupts for VFs */
7375 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
7376 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
7377 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
7378 i40e_flush(hw);
7379 }
7380
7381 i40e_dbg_pf_init(pf);
7382
7383 /* tell the firmware that we're starting */
7384 dv.major_version = DRV_VERSION_MAJOR;
7385 dv.minor_version = DRV_VERSION_MINOR;
7386 dv.build_version = DRV_VERSION_BUILD;
7387 dv.subbuild_version = 0;
7388 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7389
7390 /* since everything's happy, start the service_task timer */
7391 mod_timer(&pf->service_timer,
7392 round_jiffies(jiffies + pf->service_timer_period));
7393
7394 return 0;
7395
7396 /* Unwind what we've done if something failed in the setup */
7397err_vsis:
7398 set_bit(__I40E_DOWN, &pf->state);
7399err_switch_setup:
7400 i40e_clear_interrupt_scheme(pf);
7401 kfree(pf->vsi);
7402 del_timer_sync(&pf->service_timer);
7403err_mac_addr:
7404err_configure_lan_hmc:
7405 (void)i40e_shutdown_lan_hmc(hw);
7406err_init_lan_hmc:
7407 kfree(pf->qp_pile);
7408 kfree(pf->irq_pile);
7409err_sw_init:
7410err_adminq_setup:
7411 (void)i40e_shutdown_adminq(hw);
7412err_pf_reset:
7413 iounmap(hw->hw_addr);
7414err_ioremap:
7415 kfree(pf);
7416err_pf_alloc:
7417 pci_disable_pcie_error_reporting(pdev);
7418 pci_release_selected_regions(pdev,
7419 pci_select_bars(pdev, IORESOURCE_MEM));
7420err_pci_reg:
7421err_dma:
7422 pci_disable_device(pdev);
7423 return err;
7424}
7425
7426/**
7427 * i40e_remove - Device removal routine
7428 * @pdev: PCI device information struct
7429 *
7430 * i40e_remove is called by the PCI subsystem to alert the driver
7431 * that is should release a PCI device. This could be caused by a
7432 * Hot-Plug event, or because the driver is going to be removed from
7433 * memory.
7434 **/
7435static void i40e_remove(struct pci_dev *pdev)
7436{
7437 struct i40e_pf *pf = pci_get_drvdata(pdev);
7438 i40e_status ret_code;
7439 u32 reg;
7440 int i;
7441
7442 i40e_dbg_pf_exit(pf);
7443
7444 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7445 i40e_free_vfs(pf);
7446 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
7447 }
7448
7449 /* no more scheduling of any task */
7450 set_bit(__I40E_DOWN, &pf->state);
7451 del_timer_sync(&pf->service_timer);
7452 cancel_work_sync(&pf->service_task);
7453
7454 i40e_fdir_teardown(pf);
7455
7456 /* If there is a switch structure or any orphans, remove them.
7457 * This will leave only the PF's VSI remaining.
7458 */
7459 for (i = 0; i < I40E_MAX_VEB; i++) {
7460 if (!pf->veb[i])
7461 continue;
7462
7463 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
7464 pf->veb[i]->uplink_seid == 0)
7465 i40e_switch_branch_release(pf->veb[i]);
7466 }
7467
7468 /* Now we can shutdown the PF's VSI, just before we kill
7469 * adminq and hmc.
7470 */
7471 if (pf->vsi[pf->lan_vsi])
7472 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
7473
7474 i40e_stop_misc_vector(pf);
7475 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7476 synchronize_irq(pf->msix_entries[0].vector);
7477 free_irq(pf->msix_entries[0].vector, pf);
7478 }
7479
7480 /* shutdown and destroy the HMC */
7481 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
7482 if (ret_code)
7483 dev_warn(&pdev->dev,
7484 "Failed to destroy the HMC resources: %d\n", ret_code);
7485
7486 /* shutdown the adminq */
7487 i40e_aq_queue_shutdown(&pf->hw, true);
7488 ret_code = i40e_shutdown_adminq(&pf->hw);
7489 if (ret_code)
7490 dev_warn(&pdev->dev,
7491 "Failed to destroy the Admin Queue resources: %d\n",
7492 ret_code);
7493
7494 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
7495 i40e_clear_interrupt_scheme(pf);
7496 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7497 if (pf->vsi[i]) {
7498 i40e_vsi_clear_rings(pf->vsi[i]);
7499 i40e_vsi_clear(pf->vsi[i]);
7500 pf->vsi[i] = NULL;
7501 }
7502 }
7503
7504 for (i = 0; i < I40E_MAX_VEB; i++) {
7505 kfree(pf->veb[i]);
7506 pf->veb[i] = NULL;
7507 }
7508
7509 kfree(pf->qp_pile);
7510 kfree(pf->irq_pile);
7511 kfree(pf->sw_config);
7512 kfree(pf->vsi);
7513
7514 /* force a PF reset to clean anything leftover */
7515 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
7516 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
7517 i40e_flush(&pf->hw);
7518
7519 iounmap(pf->hw.hw_addr);
7520 kfree(pf);
7521 pci_release_selected_regions(pdev,
7522 pci_select_bars(pdev, IORESOURCE_MEM));
7523
7524 pci_disable_pcie_error_reporting(pdev);
7525 pci_disable_device(pdev);
7526}
7527
7528/**
7529 * i40e_pci_error_detected - warning that something funky happened in PCI land
7530 * @pdev: PCI device information struct
7531 *
7532 * Called to warn that something happened and the error handling steps
7533 * are in progress. Allows the driver to quiesce things, be ready for
7534 * remediation.
7535 **/
7536static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
7537 enum pci_channel_state error)
7538{
7539 struct i40e_pf *pf = pci_get_drvdata(pdev);
7540
7541 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
7542
7543 /* shutdown all operations */
7544 i40e_pf_quiesce_all_vsi(pf);
7545
7546 /* Request a slot reset */
7547 return PCI_ERS_RESULT_NEED_RESET;
7548}
7549
7550/**
7551 * i40e_pci_error_slot_reset - a PCI slot reset just happened
7552 * @pdev: PCI device information struct
7553 *
7554 * Called to find if the driver can work with the device now that
7555 * the pci slot has been reset. If a basic connection seems good
7556 * (registers are readable and have sane content) then return a
7557 * happy little PCI_ERS_RESULT_xxx.
7558 **/
7559static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
7560{
7561 struct i40e_pf *pf = pci_get_drvdata(pdev);
7562 pci_ers_result_t result;
7563 int err;
7564 u32 reg;
7565
7566 dev_info(&pdev->dev, "%s\n", __func__);
7567 if (pci_enable_device_mem(pdev)) {
7568 dev_info(&pdev->dev,
7569 "Cannot re-enable PCI device after reset.\n");
7570 result = PCI_ERS_RESULT_DISCONNECT;
7571 } else {
7572 pci_set_master(pdev);
7573 pci_restore_state(pdev);
7574 pci_save_state(pdev);
7575 pci_wake_from_d3(pdev, false);
7576
7577 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7578 if (reg == 0)
7579 result = PCI_ERS_RESULT_RECOVERED;
7580 else
7581 result = PCI_ERS_RESULT_DISCONNECT;
7582 }
7583
7584 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7585 if (err) {
7586 dev_info(&pdev->dev,
7587 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7588 err);
7589 /* non-fatal, continue */
7590 }
7591
7592 return result;
7593}
7594
7595/**
7596 * i40e_pci_error_resume - restart operations after PCI error recovery
7597 * @pdev: PCI device information struct
7598 *
7599 * Called to allow the driver to bring things back up after PCI error
7600 * and/or reset recovery has finished.
7601 **/
7602static void i40e_pci_error_resume(struct pci_dev *pdev)
7603{
7604 struct i40e_pf *pf = pci_get_drvdata(pdev);
7605
7606 dev_info(&pdev->dev, "%s\n", __func__);
7607 i40e_handle_reset_warning(pf);
7608}
7609
7610static const struct pci_error_handlers i40e_err_handler = {
7611 .error_detected = i40e_pci_error_detected,
7612 .slot_reset = i40e_pci_error_slot_reset,
7613 .resume = i40e_pci_error_resume,
7614};
7615
7616static struct pci_driver i40e_driver = {
7617 .name = i40e_driver_name,
7618 .id_table = i40e_pci_tbl,
7619 .probe = i40e_probe,
7620 .remove = i40e_remove,
7621 .err_handler = &i40e_err_handler,
7622 .sriov_configure = i40e_pci_sriov_configure,
7623};
7624
7625/**
7626 * i40e_init_module - Driver registration routine
7627 *
7628 * i40e_init_module is the first routine called when the driver is
7629 * loaded. All it does is register with the PCI subsystem.
7630 **/
7631static int __init i40e_init_module(void)
7632{
7633 pr_info("%s: %s - version %s\n", i40e_driver_name,
7634 i40e_driver_string, i40e_driver_version_str);
7635 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
7636 i40e_dbg_init();
7637 return pci_register_driver(&i40e_driver);
7638}
7639module_init(i40e_init_module);
7640
7641/**
7642 * i40e_exit_module - Driver exit cleanup routine
7643 *
7644 * i40e_exit_module is called just before the driver is removed
7645 * from memory.
7646 **/
7647static void __exit i40e_exit_module(void)
7648{
7649 pci_unregister_driver(&i40e_driver);
7650 i40e_dbg_exit();
7651}
7652module_exit(i40e_exit_module);