1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
11 #include "i40e_diag.h"
13 #include <net/udp_tunnel.h>
14 #include <net/xdp_sock.h>
15 /* All i40e tracepoints are defined by the include below, which
16 * must be included exactly once across the whole kernel with
17 * CREATE_TRACE_POINTS defined
19 #define CREATE_TRACE_POINTS
20 #include "i40e_trace.h"
22 const char i40e_driver_name[] = "i40e";
23 static const char i40e_driver_string[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
28 #define DRV_VERSION_MAJOR 2
29 #define DRV_VERSION_MINOR 8
30 #define DRV_VERSION_BUILD 10
31 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34 const char i40e_driver_version_str[] = DRV_VERSION;
35 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
37 /* a bit of forward declarations */
38 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
39 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
40 static int i40e_add_vsi(struct i40e_vsi *vsi);
41 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
42 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
43 static int i40e_setup_misc_vector(struct i40e_pf *pf);
44 static void i40e_determine_queue_usage(struct i40e_pf *pf);
45 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
46 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
47 static int i40e_reset(struct i40e_pf *pf);
48 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
50 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
51 static int i40e_get_capabilities(struct i40e_pf *pf,
52 enum i40e_admin_queue_opc list_type);
55 /* i40e_pci_tbl - PCI Device ID Table
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
60 * Class, Class Mask, private data (not used) }
62 static const struct pci_device_id i40e_pci_tbl[] = {
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
87 #define I40E_MAX_VF_COUNT 128
88 static int debug = -1;
89 module_param(debug, uint, 0);
90 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94 MODULE_LICENSE("GPL v2");
95 MODULE_VERSION(DRV_VERSION);
97 static struct workqueue_struct *i40e_wq;
100 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
101 * @hw: pointer to the HW structure
102 * @mem: ptr to mem struct to fill out
103 * @size: size of memory requested
104 * @alignment: what to align the allocation to
106 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
107 u64 size, u32 alignment)
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
121 * i40e_free_dma_mem_d - OS specific memory free for shared code
122 * @hw: pointer to the HW structure
123 * @mem: ptr to mem struct to free
125 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
129 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
138 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
139 * @hw: pointer to the HW structure
140 * @mem: ptr to mem struct to fill out
141 * @size: size of memory requested
143 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
147 mem->va = kzalloc(size, GFP_KERNEL);
156 * i40e_free_virt_mem_d - OS specific memory free for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to free
160 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
162 /* it's ok to kfree a NULL pointer */
171 * i40e_get_lump - find a lump of free generic resource
172 * @pf: board private structure
173 * @pile: the pile of resource to search
174 * @needed: the number of items needed
175 * @id: an owner id to stick on the items assigned
177 * Returns the base item index of the lump, or negative for error
179 * The search_hint trick and lack of advanced fit-finding only work
180 * because we're highly likely to have all the same size lump requests.
181 * Linear search time and any fragmentation should be minimal.
183 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
189 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
190 dev_info(&pf->pdev->dev,
191 "param err: pile=%s needed=%d id=0x%04x\n",
192 pile ? "<valid>" : "<null>", needed, id);
196 /* start the linear search with an imperfect hint */
197 i = pile->search_hint;
198 while (i < pile->num_entries) {
199 /* skip already allocated entries */
200 if (pile->list[i] & I40E_PILE_VALID_BIT) {
205 /* do we have enough in this lump? */
206 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
207 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
212 /* there was enough, so assign it to the requestor */
213 for (j = 0; j < needed; j++)
214 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
216 pile->search_hint = i + j;
220 /* not enough, so skip over it and continue looking */
228 * i40e_put_lump - return a lump of generic resource
229 * @pile: the pile of resource to search
230 * @index: the base item index
231 * @id: the owner id of the items assigned
233 * Returns the count of items in the lump
235 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
237 int valid_id = (id | I40E_PILE_VALID_BIT);
241 if (!pile || index >= pile->num_entries)
245 i < pile->num_entries && pile->list[i] == valid_id;
251 if (count && index < pile->search_hint)
252 pile->search_hint = index;
258 * i40e_find_vsi_from_id - searches for the vsi with the given id
259 * @pf: the pf structure to search for the vsi
260 * @id: id of the vsi it is searching for
262 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
266 for (i = 0; i < pf->num_alloc_vsi; i++)
267 if (pf->vsi[i] && (pf->vsi[i]->id == id))
274 * i40e_service_event_schedule - Schedule the service task to wake up
275 * @pf: board private structure
277 * If not already scheduled, this puts the task into the work queue
279 void i40e_service_event_schedule(struct i40e_pf *pf)
281 if (!test_bit(__I40E_DOWN, pf->state) &&
282 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
283 queue_work(i40e_wq, &pf->service_task);
287 * i40e_tx_timeout - Respond to a Tx Hang
288 * @netdev: network interface device structure
290 * If any port has noticed a Tx timeout, it is likely that the whole
291 * device is munged, not just the one netdev port, so go for the full
294 static void i40e_tx_timeout(struct net_device *netdev)
296 struct i40e_netdev_priv *np = netdev_priv(netdev);
297 struct i40e_vsi *vsi = np->vsi;
298 struct i40e_pf *pf = vsi->back;
299 struct i40e_ring *tx_ring = NULL;
300 unsigned int i, hung_queue = 0;
303 pf->tx_timeout_count++;
305 /* find the stopped queue the same way the stack does */
306 for (i = 0; i < netdev->num_tx_queues; i++) {
307 struct netdev_queue *q;
308 unsigned long trans_start;
310 q = netdev_get_tx_queue(netdev, i);
311 trans_start = q->trans_start;
312 if (netif_xmit_stopped(q) &&
314 (trans_start + netdev->watchdog_timeo))) {
320 if (i == netdev->num_tx_queues) {
321 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
323 /* now that we have an index, find the tx_ring struct */
324 for (i = 0; i < vsi->num_queue_pairs; i++) {
325 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
327 vsi->tx_rings[i]->queue_index) {
328 tx_ring = vsi->tx_rings[i];
335 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
336 pf->tx_timeout_recovery_level = 1; /* reset after some time */
337 else if (time_before(jiffies,
338 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
339 return; /* don't do any new action before the next timeout */
341 /* don't kick off another recovery if one is already pending */
342 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
346 head = i40e_get_head(tx_ring);
347 /* Read interrupt register */
348 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
350 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
351 tx_ring->vsi->base_vector - 1));
353 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
355 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
356 vsi->seid, hung_queue, tx_ring->next_to_clean,
357 head, tx_ring->next_to_use,
358 readl(tx_ring->tail), val);
361 pf->tx_timeout_last_recovery = jiffies;
362 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
363 pf->tx_timeout_recovery_level, hung_queue);
365 switch (pf->tx_timeout_recovery_level) {
367 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
370 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
373 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
376 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
380 i40e_service_event_schedule(pf);
381 pf->tx_timeout_recovery_level++;
385 * i40e_get_vsi_stats_struct - Get System Network Statistics
386 * @vsi: the VSI we care about
388 * Returns the address of the device statistics structure.
389 * The statistics are actually updated from the service task.
391 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
393 return &vsi->net_stats;
397 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
398 * @ring: Tx ring to get statistics from
399 * @stats: statistics entry to be updated
401 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
402 struct rtnl_link_stats64 *stats)
408 start = u64_stats_fetch_begin_irq(&ring->syncp);
409 packets = ring->stats.packets;
410 bytes = ring->stats.bytes;
411 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
413 stats->tx_packets += packets;
414 stats->tx_bytes += bytes;
418 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
419 * @netdev: network interface device structure
420 * @stats: data structure to store statistics
422 * Returns the address of the device statistics structure.
423 * The statistics are actually updated from the service task.
425 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
426 struct rtnl_link_stats64 *stats)
428 struct i40e_netdev_priv *np = netdev_priv(netdev);
429 struct i40e_vsi *vsi = np->vsi;
430 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
431 struct i40e_ring *ring;
434 if (test_bit(__I40E_VSI_DOWN, vsi->state))
441 for (i = 0; i < vsi->num_queue_pairs; i++) {
445 ring = READ_ONCE(vsi->tx_rings[i]);
448 i40e_get_netdev_stats_struct_tx(ring, stats);
450 if (i40e_enabled_xdp_vsi(vsi)) {
452 i40e_get_netdev_stats_struct_tx(ring, stats);
457 start = u64_stats_fetch_begin_irq(&ring->syncp);
458 packets = ring->stats.packets;
459 bytes = ring->stats.bytes;
460 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
462 stats->rx_packets += packets;
463 stats->rx_bytes += bytes;
468 /* following stats updated by i40e_watchdog_subtask() */
469 stats->multicast = vsi_stats->multicast;
470 stats->tx_errors = vsi_stats->tx_errors;
471 stats->tx_dropped = vsi_stats->tx_dropped;
472 stats->rx_errors = vsi_stats->rx_errors;
473 stats->rx_dropped = vsi_stats->rx_dropped;
474 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
475 stats->rx_length_errors = vsi_stats->rx_length_errors;
479 * i40e_vsi_reset_stats - Resets all stats of the given vsi
480 * @vsi: the VSI to have its stats reset
482 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
484 struct rtnl_link_stats64 *ns;
490 ns = i40e_get_vsi_stats_struct(vsi);
491 memset(ns, 0, sizeof(*ns));
492 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
493 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
494 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
495 if (vsi->rx_rings && vsi->rx_rings[0]) {
496 for (i = 0; i < vsi->num_queue_pairs; i++) {
497 memset(&vsi->rx_rings[i]->stats, 0,
498 sizeof(vsi->rx_rings[i]->stats));
499 memset(&vsi->rx_rings[i]->rx_stats, 0,
500 sizeof(vsi->rx_rings[i]->rx_stats));
501 memset(&vsi->tx_rings[i]->stats, 0,
502 sizeof(vsi->tx_rings[i]->stats));
503 memset(&vsi->tx_rings[i]->tx_stats, 0,
504 sizeof(vsi->tx_rings[i]->tx_stats));
507 vsi->stat_offsets_loaded = false;
511 * i40e_pf_reset_stats - Reset all of the stats for the given PF
512 * @pf: the PF to be reset
514 void i40e_pf_reset_stats(struct i40e_pf *pf)
518 memset(&pf->stats, 0, sizeof(pf->stats));
519 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
520 pf->stat_offsets_loaded = false;
522 for (i = 0; i < I40E_MAX_VEB; i++) {
524 memset(&pf->veb[i]->stats, 0,
525 sizeof(pf->veb[i]->stats));
526 memset(&pf->veb[i]->stats_offsets, 0,
527 sizeof(pf->veb[i]->stats_offsets));
528 pf->veb[i]->stat_offsets_loaded = false;
531 pf->hw_csum_rx_error = 0;
535 * i40e_stat_update48 - read and update a 48 bit stat from the chip
536 * @hw: ptr to the hardware info
537 * @hireg: the high 32 bit reg to read
538 * @loreg: the low 32 bit reg to read
539 * @offset_loaded: has the initial offset been loaded yet
540 * @offset: ptr to current offset value
541 * @stat: ptr to the stat
543 * Since the device stats are not reset at PFReset, they likely will not
544 * be zeroed when the driver starts. We'll save the first values read
545 * and use them as offsets to be subtracted from the raw values in order
546 * to report stats that count from zero. In the process, we also manage
547 * the potential roll-over.
549 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
550 bool offset_loaded, u64 *offset, u64 *stat)
554 if (hw->device_id == I40E_DEV_ID_QEMU) {
555 new_data = rd32(hw, loreg);
556 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
558 new_data = rd64(hw, loreg);
562 if (likely(new_data >= *offset))
563 *stat = new_data - *offset;
565 *stat = (new_data + BIT_ULL(48)) - *offset;
566 *stat &= 0xFFFFFFFFFFFFULL;
570 * i40e_stat_update32 - read and update a 32 bit stat from the chip
571 * @hw: ptr to the hardware info
572 * @reg: the hw reg to read
573 * @offset_loaded: has the initial offset been loaded yet
574 * @offset: ptr to current offset value
575 * @stat: ptr to the stat
577 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
578 bool offset_loaded, u64 *offset, u64 *stat)
582 new_data = rd32(hw, reg);
585 if (likely(new_data >= *offset))
586 *stat = (u32)(new_data - *offset);
588 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
592 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
593 * @hw: ptr to the hardware info
594 * @reg: the hw reg to read and clear
595 * @stat: ptr to the stat
597 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
599 u32 new_data = rd32(hw, reg);
601 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
606 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
607 * @vsi: the VSI to be updated
609 void i40e_update_eth_stats(struct i40e_vsi *vsi)
611 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
612 struct i40e_pf *pf = vsi->back;
613 struct i40e_hw *hw = &pf->hw;
614 struct i40e_eth_stats *oes;
615 struct i40e_eth_stats *es; /* device's eth stats */
617 es = &vsi->eth_stats;
618 oes = &vsi->eth_stats_offsets;
620 /* Gather up the stats that the hw collects */
621 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
622 vsi->stat_offsets_loaded,
623 &oes->tx_errors, &es->tx_errors);
624 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
625 vsi->stat_offsets_loaded,
626 &oes->rx_discards, &es->rx_discards);
627 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
628 vsi->stat_offsets_loaded,
629 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
630 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
631 vsi->stat_offsets_loaded,
632 &oes->tx_errors, &es->tx_errors);
634 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
635 I40E_GLV_GORCL(stat_idx),
636 vsi->stat_offsets_loaded,
637 &oes->rx_bytes, &es->rx_bytes);
638 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
639 I40E_GLV_UPRCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->rx_unicast, &es->rx_unicast);
642 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
643 I40E_GLV_MPRCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->rx_multicast, &es->rx_multicast);
646 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
647 I40E_GLV_BPRCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->rx_broadcast, &es->rx_broadcast);
651 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
652 I40E_GLV_GOTCL(stat_idx),
653 vsi->stat_offsets_loaded,
654 &oes->tx_bytes, &es->tx_bytes);
655 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
656 I40E_GLV_UPTCL(stat_idx),
657 vsi->stat_offsets_loaded,
658 &oes->tx_unicast, &es->tx_unicast);
659 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
660 I40E_GLV_MPTCL(stat_idx),
661 vsi->stat_offsets_loaded,
662 &oes->tx_multicast, &es->tx_multicast);
663 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
664 I40E_GLV_BPTCL(stat_idx),
665 vsi->stat_offsets_loaded,
666 &oes->tx_broadcast, &es->tx_broadcast);
667 vsi->stat_offsets_loaded = true;
671 * i40e_update_veb_stats - Update Switch component statistics
672 * @veb: the VEB being updated
674 static void i40e_update_veb_stats(struct i40e_veb *veb)
676 struct i40e_pf *pf = veb->pf;
677 struct i40e_hw *hw = &pf->hw;
678 struct i40e_eth_stats *oes;
679 struct i40e_eth_stats *es; /* device's eth stats */
680 struct i40e_veb_tc_stats *veb_oes;
681 struct i40e_veb_tc_stats *veb_es;
684 idx = veb->stats_idx;
686 oes = &veb->stats_offsets;
687 veb_es = &veb->tc_stats;
688 veb_oes = &veb->tc_stats_offsets;
690 /* Gather up the stats that the hw collects */
691 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
692 veb->stat_offsets_loaded,
693 &oes->tx_discards, &es->tx_discards);
694 if (hw->revision_id > 0)
695 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
696 veb->stat_offsets_loaded,
697 &oes->rx_unknown_protocol,
698 &es->rx_unknown_protocol);
699 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_bytes, &es->rx_bytes);
702 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
703 veb->stat_offsets_loaded,
704 &oes->rx_unicast, &es->rx_unicast);
705 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
706 veb->stat_offsets_loaded,
707 &oes->rx_multicast, &es->rx_multicast);
708 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
709 veb->stat_offsets_loaded,
710 &oes->rx_broadcast, &es->rx_broadcast);
712 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->tx_bytes, &es->tx_bytes);
715 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
716 veb->stat_offsets_loaded,
717 &oes->tx_unicast, &es->tx_unicast);
718 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
719 veb->stat_offsets_loaded,
720 &oes->tx_multicast, &es->tx_multicast);
721 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
722 veb->stat_offsets_loaded,
723 &oes->tx_broadcast, &es->tx_broadcast);
724 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
725 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
726 I40E_GLVEBTC_RPCL(i, idx),
727 veb->stat_offsets_loaded,
728 &veb_oes->tc_rx_packets[i],
729 &veb_es->tc_rx_packets[i]);
730 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
731 I40E_GLVEBTC_RBCL(i, idx),
732 veb->stat_offsets_loaded,
733 &veb_oes->tc_rx_bytes[i],
734 &veb_es->tc_rx_bytes[i]);
735 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
736 I40E_GLVEBTC_TPCL(i, idx),
737 veb->stat_offsets_loaded,
738 &veb_oes->tc_tx_packets[i],
739 &veb_es->tc_tx_packets[i]);
740 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
741 I40E_GLVEBTC_TBCL(i, idx),
742 veb->stat_offsets_loaded,
743 &veb_oes->tc_tx_bytes[i],
744 &veb_es->tc_tx_bytes[i]);
746 veb->stat_offsets_loaded = true;
750 * i40e_update_vsi_stats - Update the vsi statistics counters.
751 * @vsi: the VSI to be updated
753 * There are a few instances where we store the same stat in a
754 * couple of different structs. This is partly because we have
755 * the netdev stats that need to be filled out, which is slightly
756 * different from the "eth_stats" defined by the chip and used in
757 * VF communications. We sort it out here.
759 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
761 struct i40e_pf *pf = vsi->back;
762 struct rtnl_link_stats64 *ons;
763 struct rtnl_link_stats64 *ns; /* netdev stats */
764 struct i40e_eth_stats *oes;
765 struct i40e_eth_stats *es; /* device's eth stats */
766 u32 tx_restart, tx_busy;
777 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
778 test_bit(__I40E_CONFIG_BUSY, pf->state))
781 ns = i40e_get_vsi_stats_struct(vsi);
782 ons = &vsi->net_stats_offsets;
783 es = &vsi->eth_stats;
784 oes = &vsi->eth_stats_offsets;
786 /* Gather up the netdev and vsi stats that the driver collects
787 * on the fly during packet processing
791 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
795 for (q = 0; q < vsi->num_queue_pairs; q++) {
797 p = READ_ONCE(vsi->tx_rings[q]);
800 start = u64_stats_fetch_begin_irq(&p->syncp);
801 packets = p->stats.packets;
802 bytes = p->stats.bytes;
803 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
806 tx_restart += p->tx_stats.restart_queue;
807 tx_busy += p->tx_stats.tx_busy;
808 tx_linearize += p->tx_stats.tx_linearize;
809 tx_force_wb += p->tx_stats.tx_force_wb;
811 /* Rx queue is part of the same block as Tx queue */
814 start = u64_stats_fetch_begin_irq(&p->syncp);
815 packets = p->stats.packets;
816 bytes = p->stats.bytes;
817 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
820 rx_buf += p->rx_stats.alloc_buff_failed;
821 rx_page += p->rx_stats.alloc_page_failed;
824 vsi->tx_restart = tx_restart;
825 vsi->tx_busy = tx_busy;
826 vsi->tx_linearize = tx_linearize;
827 vsi->tx_force_wb = tx_force_wb;
828 vsi->rx_page_failed = rx_page;
829 vsi->rx_buf_failed = rx_buf;
831 ns->rx_packets = rx_p;
833 ns->tx_packets = tx_p;
836 /* update netdev stats from eth stats */
837 i40e_update_eth_stats(vsi);
838 ons->tx_errors = oes->tx_errors;
839 ns->tx_errors = es->tx_errors;
840 ons->multicast = oes->rx_multicast;
841 ns->multicast = es->rx_multicast;
842 ons->rx_dropped = oes->rx_discards;
843 ns->rx_dropped = es->rx_discards;
844 ons->tx_dropped = oes->tx_discards;
845 ns->tx_dropped = es->tx_discards;
847 /* pull in a couple PF stats if this is the main vsi */
848 if (vsi == pf->vsi[pf->lan_vsi]) {
849 ns->rx_crc_errors = pf->stats.crc_errors;
850 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
851 ns->rx_length_errors = pf->stats.rx_length_errors;
856 * i40e_update_pf_stats - Update the PF statistics counters.
857 * @pf: the PF to be updated
859 static void i40e_update_pf_stats(struct i40e_pf *pf)
861 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
862 struct i40e_hw_port_stats *nsd = &pf->stats;
863 struct i40e_hw *hw = &pf->hw;
867 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
868 I40E_GLPRT_GORCL(hw->port),
869 pf->stat_offsets_loaded,
870 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
871 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
872 I40E_GLPRT_GOTCL(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
875 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
876 pf->stat_offsets_loaded,
877 &osd->eth.rx_discards,
878 &nsd->eth.rx_discards);
879 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
880 I40E_GLPRT_UPRCL(hw->port),
881 pf->stat_offsets_loaded,
882 &osd->eth.rx_unicast,
883 &nsd->eth.rx_unicast);
884 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
885 I40E_GLPRT_MPRCL(hw->port),
886 pf->stat_offsets_loaded,
887 &osd->eth.rx_multicast,
888 &nsd->eth.rx_multicast);
889 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
890 I40E_GLPRT_BPRCL(hw->port),
891 pf->stat_offsets_loaded,
892 &osd->eth.rx_broadcast,
893 &nsd->eth.rx_broadcast);
894 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
895 I40E_GLPRT_UPTCL(hw->port),
896 pf->stat_offsets_loaded,
897 &osd->eth.tx_unicast,
898 &nsd->eth.tx_unicast);
899 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
900 I40E_GLPRT_MPTCL(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->eth.tx_multicast,
903 &nsd->eth.tx_multicast);
904 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
905 I40E_GLPRT_BPTCL(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->eth.tx_broadcast,
908 &nsd->eth.tx_broadcast);
910 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->tx_dropped_link_down,
913 &nsd->tx_dropped_link_down);
915 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->crc_errors, &nsd->crc_errors);
919 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->illegal_bytes, &nsd->illegal_bytes);
923 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
924 pf->stat_offsets_loaded,
925 &osd->mac_local_faults,
926 &nsd->mac_local_faults);
927 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
928 pf->stat_offsets_loaded,
929 &osd->mac_remote_faults,
930 &nsd->mac_remote_faults);
932 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->rx_length_errors,
935 &nsd->rx_length_errors);
937 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->link_xon_rx, &nsd->link_xon_rx);
940 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->link_xon_tx, &nsd->link_xon_tx);
943 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->link_xoff_rx, &nsd->link_xoff_rx);
946 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->link_xoff_tx, &nsd->link_xoff_tx);
950 for (i = 0; i < 8; i++) {
951 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
952 pf->stat_offsets_loaded,
953 &osd->priority_xoff_rx[i],
954 &nsd->priority_xoff_rx[i]);
955 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
956 pf->stat_offsets_loaded,
957 &osd->priority_xon_rx[i],
958 &nsd->priority_xon_rx[i]);
959 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
960 pf->stat_offsets_loaded,
961 &osd->priority_xon_tx[i],
962 &nsd->priority_xon_tx[i]);
963 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
964 pf->stat_offsets_loaded,
965 &osd->priority_xoff_tx[i],
966 &nsd->priority_xoff_tx[i]);
967 i40e_stat_update32(hw,
968 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
969 pf->stat_offsets_loaded,
970 &osd->priority_xon_2_xoff[i],
971 &nsd->priority_xon_2_xoff[i]);
974 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
975 I40E_GLPRT_PRC64L(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->rx_size_64, &nsd->rx_size_64);
978 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
979 I40E_GLPRT_PRC127L(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->rx_size_127, &nsd->rx_size_127);
982 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
983 I40E_GLPRT_PRC255L(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->rx_size_255, &nsd->rx_size_255);
986 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
987 I40E_GLPRT_PRC511L(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->rx_size_511, &nsd->rx_size_511);
990 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
991 I40E_GLPRT_PRC1023L(hw->port),
992 pf->stat_offsets_loaded,
993 &osd->rx_size_1023, &nsd->rx_size_1023);
994 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
995 I40E_GLPRT_PRC1522L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->rx_size_1522, &nsd->rx_size_1522);
998 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
999 I40E_GLPRT_PRC9522L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->rx_size_big, &nsd->rx_size_big);
1003 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1004 I40E_GLPRT_PTC64L(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->tx_size_64, &nsd->tx_size_64);
1007 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1008 I40E_GLPRT_PTC127L(hw->port),
1009 pf->stat_offsets_loaded,
1010 &osd->tx_size_127, &nsd->tx_size_127);
1011 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1012 I40E_GLPRT_PTC255L(hw->port),
1013 pf->stat_offsets_loaded,
1014 &osd->tx_size_255, &nsd->tx_size_255);
1015 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1016 I40E_GLPRT_PTC511L(hw->port),
1017 pf->stat_offsets_loaded,
1018 &osd->tx_size_511, &nsd->tx_size_511);
1019 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1020 I40E_GLPRT_PTC1023L(hw->port),
1021 pf->stat_offsets_loaded,
1022 &osd->tx_size_1023, &nsd->tx_size_1023);
1023 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1024 I40E_GLPRT_PTC1522L(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->tx_size_1522, &nsd->tx_size_1522);
1027 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1028 I40E_GLPRT_PTC9522L(hw->port),
1029 pf->stat_offsets_loaded,
1030 &osd->tx_size_big, &nsd->tx_size_big);
1032 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->rx_undersize, &nsd->rx_undersize);
1035 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1036 pf->stat_offsets_loaded,
1037 &osd->rx_fragments, &nsd->rx_fragments);
1038 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1039 pf->stat_offsets_loaded,
1040 &osd->rx_oversize, &nsd->rx_oversize);
1041 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_jabber, &nsd->rx_jabber);
1046 i40e_stat_update_and_clear32(hw,
1047 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1048 &nsd->fd_atr_match);
1049 i40e_stat_update_and_clear32(hw,
1050 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1052 i40e_stat_update_and_clear32(hw,
1053 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1054 &nsd->fd_atr_tunnel_match);
1056 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1057 nsd->tx_lpi_status =
1058 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1059 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1060 nsd->rx_lpi_status =
1061 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1062 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1063 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1064 pf->stat_offsets_loaded,
1065 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1066 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1067 pf->stat_offsets_loaded,
1068 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1070 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1071 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1072 nsd->fd_sb_status = true;
1074 nsd->fd_sb_status = false;
1076 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1077 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1078 nsd->fd_atr_status = true;
1080 nsd->fd_atr_status = false;
1082 pf->stat_offsets_loaded = true;
1086 * i40e_update_stats - Update the various statistics counters.
1087 * @vsi: the VSI to be updated
1089 * Update the various stats for this VSI and its related entities.
1091 void i40e_update_stats(struct i40e_vsi *vsi)
1093 struct i40e_pf *pf = vsi->back;
1095 if (vsi == pf->vsi[pf->lan_vsi])
1096 i40e_update_pf_stats(pf);
1098 i40e_update_vsi_stats(vsi);
1102 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1103 * @vsi: the VSI to be searched
1104 * @macaddr: the MAC address
1107 * Returns ptr to the filter object or NULL
1109 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1110 const u8 *macaddr, s16 vlan)
1112 struct i40e_mac_filter *f;
1115 if (!vsi || !macaddr)
1118 key = i40e_addr_to_hkey(macaddr);
1119 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1120 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1128 * i40e_find_mac - Find a mac addr in the macvlan filters list
1129 * @vsi: the VSI to be searched
1130 * @macaddr: the MAC address we are searching for
1132 * Returns the first filter with the provided MAC address or NULL if
1133 * MAC address was not found
1135 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1137 struct i40e_mac_filter *f;
1140 if (!vsi || !macaddr)
1143 key = i40e_addr_to_hkey(macaddr);
1144 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1145 if ((ether_addr_equal(macaddr, f->macaddr)))
1152 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1153 * @vsi: the VSI to be searched
1155 * Returns true if VSI is in vlan mode or false otherwise
1157 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1159 /* If we have a PVID, always operate in VLAN mode */
1163 /* We need to operate in VLAN mode whenever we have any filters with
1164 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1165 * time, incurring search cost repeatedly. However, we can notice two
1168 * 1) the only place where we can gain a VLAN filter is in
1171 * 2) the only place where filters are actually removed is in
1172 * i40e_sync_filters_subtask.
1174 * Thus, we can simply use a boolean value, has_vlan_filters which we
1175 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1176 * we have to perform the full search after deleting filters in
1177 * i40e_sync_filters_subtask, but we already have to search
1178 * filters here and can perform the check at the same time. This
1179 * results in avoiding embedding a loop for VLAN mode inside another
1180 * loop over all the filters, and should maintain correctness as noted
1183 return vsi->has_vlan_filter;
1187 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1188 * @vsi: the VSI to configure
1189 * @tmp_add_list: list of filters ready to be added
1190 * @tmp_del_list: list of filters ready to be deleted
1191 * @vlan_filters: the number of active VLAN filters
1193 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1194 * behave as expected. If we have any active VLAN filters remaining or about
1195 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1196 * so that they only match against untagged traffic. If we no longer have any
1197 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1198 * so that they match against both tagged and untagged traffic. In this way,
1199 * we ensure that we correctly receive the desired traffic. This ensures that
1200 * when we have an active VLAN we will receive only untagged traffic and
1201 * traffic matching active VLANs. If we have no active VLANs then we will
1202 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1204 * Finally, in a similar fashion, this function also corrects filters when
1205 * there is an active PVID assigned to this VSI.
1207 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1209 * This function is only expected to be called from within
1210 * i40e_sync_vsi_filters.
1212 * NOTE: This function expects to be called while under the
1213 * mac_filter_hash_lock
1215 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1216 struct hlist_head *tmp_add_list,
1217 struct hlist_head *tmp_del_list,
1220 s16 pvid = le16_to_cpu(vsi->info.pvid);
1221 struct i40e_mac_filter *f, *add_head;
1222 struct i40e_new_mac_filter *new;
1223 struct hlist_node *h;
1226 /* To determine if a particular filter needs to be replaced we
1227 * have the three following conditions:
1229 * a) if we have a PVID assigned, then all filters which are
1230 * not marked as VLAN=PVID must be replaced with filters that
1232 * b) otherwise, if we have any active VLANS, all filters
1233 * which are marked as VLAN=-1 must be replaced with
1234 * filters marked as VLAN=0
1235 * c) finally, if we do not have any active VLANS, all filters
1236 * which are marked as VLAN=0 must be replaced with filters
1240 /* Update the filters about to be added in place */
1241 hlist_for_each_entry(new, tmp_add_list, hlist) {
1242 if (pvid && new->f->vlan != pvid)
1243 new->f->vlan = pvid;
1244 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1246 else if (!vlan_filters && new->f->vlan == 0)
1247 new->f->vlan = I40E_VLAN_ANY;
1250 /* Update the remaining active filters */
1251 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1252 /* Combine the checks for whether a filter needs to be changed
1253 * and then determine the new VLAN inside the if block, in
1254 * order to avoid duplicating code for adding the new filter
1255 * then deleting the old filter.
1257 if ((pvid && f->vlan != pvid) ||
1258 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1259 (!vlan_filters && f->vlan == 0)) {
1260 /* Determine the new vlan we will be adding */
1263 else if (vlan_filters)
1266 new_vlan = I40E_VLAN_ANY;
1268 /* Create the new filter */
1269 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1273 /* Create a temporary i40e_new_mac_filter */
1274 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1279 new->state = add_head->state;
1281 /* Add the new filter to the tmp list */
1282 hlist_add_head(&new->hlist, tmp_add_list);
1284 /* Put the original filter into the delete list */
1285 f->state = I40E_FILTER_REMOVE;
1286 hash_del(&f->hlist);
1287 hlist_add_head(&f->hlist, tmp_del_list);
1291 vsi->has_vlan_filter = !!vlan_filters;
1297 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1298 * @vsi: the PF Main VSI - inappropriate for any other VSI
1299 * @macaddr: the MAC address
1301 * Remove whatever filter the firmware set up so the driver can manage
1302 * its own filtering intelligently.
1304 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1306 struct i40e_aqc_remove_macvlan_element_data element;
1307 struct i40e_pf *pf = vsi->back;
1309 /* Only appropriate for the PF main VSI */
1310 if (vsi->type != I40E_VSI_MAIN)
1313 memset(&element, 0, sizeof(element));
1314 ether_addr_copy(element.mac_addr, macaddr);
1315 element.vlan_tag = 0;
1316 /* Ignore error returns, some firmware does it this way... */
1317 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1318 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1320 memset(&element, 0, sizeof(element));
1321 ether_addr_copy(element.mac_addr, macaddr);
1322 element.vlan_tag = 0;
1323 /* ...and some firmware does it this way. */
1324 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1325 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1326 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1330 * i40e_add_filter - Add a mac/vlan filter to the VSI
1331 * @vsi: the VSI to be searched
1332 * @macaddr: the MAC address
1335 * Returns ptr to the filter object or NULL when no memory available.
1337 * NOTE: This function is expected to be called with mac_filter_hash_lock
1340 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1341 const u8 *macaddr, s16 vlan)
1343 struct i40e_mac_filter *f;
1346 if (!vsi || !macaddr)
1349 f = i40e_find_filter(vsi, macaddr, vlan);
1351 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1355 /* Update the boolean indicating if we need to function in
1359 vsi->has_vlan_filter = true;
1361 ether_addr_copy(f->macaddr, macaddr);
1363 f->state = I40E_FILTER_NEW;
1364 INIT_HLIST_NODE(&f->hlist);
1366 key = i40e_addr_to_hkey(macaddr);
1367 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1369 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1370 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1373 /* If we're asked to add a filter that has been marked for removal, it
1374 * is safe to simply restore it to active state. __i40e_del_filter
1375 * will have simply deleted any filters which were previously marked
1376 * NEW or FAILED, so if it is currently marked REMOVE it must have
1377 * previously been ACTIVE. Since we haven't yet run the sync filters
1378 * task, just restore this filter to the ACTIVE state so that the
1379 * sync task leaves it in place
1381 if (f->state == I40E_FILTER_REMOVE)
1382 f->state = I40E_FILTER_ACTIVE;
1388 * __i40e_del_filter - Remove a specific filter from the VSI
1389 * @vsi: VSI to remove from
1390 * @f: the filter to remove from the list
1392 * This function should be called instead of i40e_del_filter only if you know
1393 * the exact filter you will remove already, such as via i40e_find_filter or
1396 * NOTE: This function is expected to be called with mac_filter_hash_lock
1398 * ANOTHER NOTE: This function MUST be called from within the context of
1399 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1400 * instead of list_for_each_entry().
1402 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1407 /* If the filter was never added to firmware then we can just delete it
1408 * directly and we don't want to set the status to remove or else an
1409 * admin queue command will unnecessarily fire.
1411 if ((f->state == I40E_FILTER_FAILED) ||
1412 (f->state == I40E_FILTER_NEW)) {
1413 hash_del(&f->hlist);
1416 f->state = I40E_FILTER_REMOVE;
1419 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1420 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1424 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1425 * @vsi: the VSI to be searched
1426 * @macaddr: the MAC address
1429 * NOTE: This function is expected to be called with mac_filter_hash_lock
1431 * ANOTHER NOTE: This function MUST be called from within the context of
1432 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1433 * instead of list_for_each_entry().
1435 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1437 struct i40e_mac_filter *f;
1439 if (!vsi || !macaddr)
1442 f = i40e_find_filter(vsi, macaddr, vlan);
1443 __i40e_del_filter(vsi, f);
1447 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1448 * @vsi: the VSI to be searched
1449 * @macaddr: the mac address to be filtered
1451 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1452 * go through all the macvlan filters and add a macvlan filter for each
1453 * unique vlan that already exists. If a PVID has been assigned, instead only
1454 * add the macaddr to that VLAN.
1456 * Returns last filter added on success, else NULL
1458 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1461 struct i40e_mac_filter *f, *add = NULL;
1462 struct hlist_node *h;
1466 return i40e_add_filter(vsi, macaddr,
1467 le16_to_cpu(vsi->info.pvid));
1469 if (!i40e_is_vsi_in_vlan(vsi))
1470 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1472 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1473 if (f->state == I40E_FILTER_REMOVE)
1475 add = i40e_add_filter(vsi, macaddr, f->vlan);
1484 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1485 * @vsi: the VSI to be searched
1486 * @macaddr: the mac address to be removed
1488 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1491 * Returns 0 for success, or error
1493 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1495 struct i40e_mac_filter *f;
1496 struct hlist_node *h;
1500 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1501 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1502 if (ether_addr_equal(macaddr, f->macaddr)) {
1503 __i40e_del_filter(vsi, f);
1515 * i40e_set_mac - NDO callback to set mac address
1516 * @netdev: network interface device structure
1517 * @p: pointer to an address structure
1519 * Returns 0 on success, negative on failure
1521 static int i40e_set_mac(struct net_device *netdev, void *p)
1523 struct i40e_netdev_priv *np = netdev_priv(netdev);
1524 struct i40e_vsi *vsi = np->vsi;
1525 struct i40e_pf *pf = vsi->back;
1526 struct i40e_hw *hw = &pf->hw;
1527 struct sockaddr *addr = p;
1529 if (!is_valid_ether_addr(addr->sa_data))
1530 return -EADDRNOTAVAIL;
1532 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1533 netdev_info(netdev, "already using mac address %pM\n",
1538 if (test_bit(__I40E_DOWN, pf->state) ||
1539 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1540 return -EADDRNOTAVAIL;
1542 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1543 netdev_info(netdev, "returning to hw mac address %pM\n",
1546 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1548 /* Copy the address first, so that we avoid a possible race with
1550 * - Remove old address from MAC filter
1551 * - Copy new address
1552 * - Add new address to MAC filter
1554 spin_lock_bh(&vsi->mac_filter_hash_lock);
1555 i40e_del_mac_filter(vsi, netdev->dev_addr);
1556 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1557 i40e_add_mac_filter(vsi, netdev->dev_addr);
1558 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1560 if (vsi->type == I40E_VSI_MAIN) {
1563 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1564 addr->sa_data, NULL);
1566 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1567 i40e_stat_str(hw, ret),
1568 i40e_aq_str(hw, hw->aq.asq_last_status));
1571 /* schedule our worker thread which will take care of
1572 * applying the new filter changes
1574 i40e_service_event_schedule(pf);
1579 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1580 * @vsi: vsi structure
1581 * @seed: RSS hash seed
1583 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1584 u8 *lut, u16 lut_size)
1586 struct i40e_pf *pf = vsi->back;
1587 struct i40e_hw *hw = &pf->hw;
1591 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1592 (struct i40e_aqc_get_set_rss_key_data *)seed;
1593 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1595 dev_info(&pf->pdev->dev,
1596 "Cannot set RSS key, err %s aq_err %s\n",
1597 i40e_stat_str(hw, ret),
1598 i40e_aq_str(hw, hw->aq.asq_last_status));
1603 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1605 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1607 dev_info(&pf->pdev->dev,
1608 "Cannot set RSS lut, err %s aq_err %s\n",
1609 i40e_stat_str(hw, ret),
1610 i40e_aq_str(hw, hw->aq.asq_last_status));
1618 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1619 * @vsi: VSI structure
1621 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1623 struct i40e_pf *pf = vsi->back;
1624 u8 seed[I40E_HKEY_ARRAY_SIZE];
1628 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1631 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1632 vsi->num_queue_pairs);
1635 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1639 /* Use the user configured hash keys and lookup table if there is one,
1640 * otherwise use default
1642 if (vsi->rss_lut_user)
1643 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1645 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1646 if (vsi->rss_hkey_user)
1647 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1649 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1650 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1656 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1657 * @vsi: the VSI being configured,
1658 * @ctxt: VSI context structure
1659 * @enabled_tc: number of traffic classes to enable
1661 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1663 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1664 struct i40e_vsi_context *ctxt,
1667 u16 qcount = 0, max_qcount, qmap, sections = 0;
1668 int i, override_q, pow, num_qps, ret;
1669 u8 netdev_tc = 0, offset = 0;
1671 if (vsi->type != I40E_VSI_MAIN)
1673 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1674 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1675 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1676 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1677 num_qps = vsi->mqprio_qopt.qopt.count[0];
1679 /* find the next higher power-of-2 of num queue pairs */
1680 pow = ilog2(num_qps);
1681 if (!is_power_of_2(num_qps))
1683 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1684 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1686 /* Setup queue offset/count for all TCs for given VSI */
1687 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1688 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1689 /* See if the given TC is enabled for the given VSI */
1690 if (vsi->tc_config.enabled_tc & BIT(i)) {
1691 offset = vsi->mqprio_qopt.qopt.offset[i];
1692 qcount = vsi->mqprio_qopt.qopt.count[i];
1693 if (qcount > max_qcount)
1694 max_qcount = qcount;
1695 vsi->tc_config.tc_info[i].qoffset = offset;
1696 vsi->tc_config.tc_info[i].qcount = qcount;
1697 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1699 /* TC is not enabled so set the offset to
1700 * default queue and allocate one queue
1703 vsi->tc_config.tc_info[i].qoffset = 0;
1704 vsi->tc_config.tc_info[i].qcount = 1;
1705 vsi->tc_config.tc_info[i].netdev_tc = 0;
1709 /* Set actual Tx/Rx queue pairs */
1710 vsi->num_queue_pairs = offset + qcount;
1712 /* Setup queue TC[0].qmap for given VSI context */
1713 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1714 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1715 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1716 ctxt->info.valid_sections |= cpu_to_le16(sections);
1718 /* Reconfigure RSS for main VSI with max queue count */
1719 vsi->rss_size = max_qcount;
1720 ret = i40e_vsi_config_rss(vsi);
1722 dev_info(&vsi->back->pdev->dev,
1723 "Failed to reconfig rss for num_queues (%u)\n",
1727 vsi->reconfig_rss = true;
1728 dev_dbg(&vsi->back->pdev->dev,
1729 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1731 /* Find queue count available for channel VSIs and starting offset
1734 override_q = vsi->mqprio_qopt.qopt.count[0];
1735 if (override_q && override_q < vsi->num_queue_pairs) {
1736 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1737 vsi->next_base_queue = override_q;
1743 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1744 * @vsi: the VSI being setup
1745 * @ctxt: VSI context structure
1746 * @enabled_tc: Enabled TCs bitmap
1747 * @is_add: True if called before Add VSI
1749 * Setup VSI queue mapping for enabled traffic classes.
1751 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1752 struct i40e_vsi_context *ctxt,
1756 struct i40e_pf *pf = vsi->back;
1766 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1769 /* Number of queues per enabled TC */
1770 num_tc_qps = vsi->alloc_queue_pairs;
1771 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1772 /* Find numtc from enabled TC bitmap */
1773 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1774 if (enabled_tc & BIT(i)) /* TC is enabled */
1778 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1781 num_tc_qps = num_tc_qps / numtc;
1782 num_tc_qps = min_t(int, num_tc_qps,
1783 i40e_pf_get_max_q_per_tc(pf));
1786 vsi->tc_config.numtc = numtc;
1787 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1789 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1790 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1791 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1793 /* Setup queue offset/count for all TCs for given VSI */
1794 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1795 /* See if the given TC is enabled for the given VSI */
1796 if (vsi->tc_config.enabled_tc & BIT(i)) {
1800 switch (vsi->type) {
1802 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1803 I40E_FLAG_FD_ATR_ENABLED)) ||
1804 vsi->tc_config.enabled_tc != 1) {
1805 qcount = min_t(int, pf->alloc_rss_size,
1811 case I40E_VSI_SRIOV:
1812 case I40E_VSI_VMDQ2:
1814 qcount = num_tc_qps;
1818 vsi->tc_config.tc_info[i].qoffset = offset;
1819 vsi->tc_config.tc_info[i].qcount = qcount;
1821 /* find the next higher power-of-2 of num queue pairs */
1824 while (num_qps && (BIT_ULL(pow) < qcount)) {
1829 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1831 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1832 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1836 /* TC is not enabled so set the offset to
1837 * default queue and allocate one queue
1840 vsi->tc_config.tc_info[i].qoffset = 0;
1841 vsi->tc_config.tc_info[i].qcount = 1;
1842 vsi->tc_config.tc_info[i].netdev_tc = 0;
1846 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1849 /* Set actual Tx/Rx queue pairs */
1850 vsi->num_queue_pairs = offset;
1851 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1852 if (vsi->req_queue_pairs > 0)
1853 vsi->num_queue_pairs = vsi->req_queue_pairs;
1854 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1855 vsi->num_queue_pairs = pf->num_lan_msix;
1858 /* Scheduler section valid can only be set for ADD VSI */
1860 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1862 ctxt->info.up_enable_bits = enabled_tc;
1864 if (vsi->type == I40E_VSI_SRIOV) {
1865 ctxt->info.mapping_flags |=
1866 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1867 for (i = 0; i < vsi->num_queue_pairs; i++)
1868 ctxt->info.queue_mapping[i] =
1869 cpu_to_le16(vsi->base_queue + i);
1871 ctxt->info.mapping_flags |=
1872 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1873 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1875 ctxt->info.valid_sections |= cpu_to_le16(sections);
1879 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1880 * @netdev: the netdevice
1881 * @addr: address to add
1883 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1884 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1886 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1888 struct i40e_netdev_priv *np = netdev_priv(netdev);
1889 struct i40e_vsi *vsi = np->vsi;
1891 if (i40e_add_mac_filter(vsi, addr))
1898 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1899 * @netdev: the netdevice
1900 * @addr: address to add
1902 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1903 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1905 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1907 struct i40e_netdev_priv *np = netdev_priv(netdev);
1908 struct i40e_vsi *vsi = np->vsi;
1910 /* Under some circumstances, we might receive a request to delete
1911 * our own device address from our uc list. Because we store the
1912 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1913 * such requests and not delete our device address from this list.
1915 if (ether_addr_equal(addr, netdev->dev_addr))
1918 i40e_del_mac_filter(vsi, addr);
1924 * i40e_set_rx_mode - NDO callback to set the netdev filters
1925 * @netdev: network interface device structure
1927 static void i40e_set_rx_mode(struct net_device *netdev)
1929 struct i40e_netdev_priv *np = netdev_priv(netdev);
1930 struct i40e_vsi *vsi = np->vsi;
1932 spin_lock_bh(&vsi->mac_filter_hash_lock);
1934 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1935 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1937 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1939 /* check for other flag changes */
1940 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1941 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1942 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1947 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1948 * @vsi: Pointer to VSI struct
1949 * @from: Pointer to list which contains MAC filter entries - changes to
1950 * those entries needs to be undone.
1952 * MAC filter entries from this list were slated for deletion.
1954 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1955 struct hlist_head *from)
1957 struct i40e_mac_filter *f;
1958 struct hlist_node *h;
1960 hlist_for_each_entry_safe(f, h, from, hlist) {
1961 u64 key = i40e_addr_to_hkey(f->macaddr);
1963 /* Move the element back into MAC filter list*/
1964 hlist_del(&f->hlist);
1965 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1970 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1971 * @vsi: Pointer to vsi struct
1972 * @from: Pointer to list which contains MAC filter entries - changes to
1973 * those entries needs to be undone.
1975 * MAC filter entries from this list were slated for addition.
1977 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1978 struct hlist_head *from)
1980 struct i40e_new_mac_filter *new;
1981 struct hlist_node *h;
1983 hlist_for_each_entry_safe(new, h, from, hlist) {
1984 /* We can simply free the wrapper structure */
1985 hlist_del(&new->hlist);
1991 * i40e_next_entry - Get the next non-broadcast filter from a list
1992 * @next: pointer to filter in list
1994 * Returns the next non-broadcast filter in the list. Required so that we
1995 * ignore broadcast filters within the list, since these are not handled via
1996 * the normal firmware update path.
1999 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2001 hlist_for_each_entry_continue(next, hlist) {
2002 if (!is_broadcast_ether_addr(next->f->macaddr))
2010 * i40e_update_filter_state - Update filter state based on return data
2012 * @count: Number of filters added
2013 * @add_list: return data from fw
2014 * @add_head: pointer to first filter in current batch
2016 * MAC filter entries from list were slated to be added to device. Returns
2017 * number of successful filters. Note that 0 does NOT mean success!
2020 i40e_update_filter_state(int count,
2021 struct i40e_aqc_add_macvlan_element_data *add_list,
2022 struct i40e_new_mac_filter *add_head)
2027 for (i = 0; i < count; i++) {
2028 /* Always check status of each filter. We don't need to check
2029 * the firmware return status because we pre-set the filter
2030 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2031 * request to the adminq. Thus, if it no longer matches then
2032 * we know the filter is active.
2034 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2035 add_head->state = I40E_FILTER_FAILED;
2037 add_head->state = I40E_FILTER_ACTIVE;
2041 add_head = i40e_next_filter(add_head);
2050 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2051 * @vsi: ptr to the VSI
2052 * @vsi_name: name to display in messages
2053 * @list: the list of filters to send to firmware
2054 * @num_del: the number of filters to delete
2055 * @retval: Set to -EIO on failure to delete
2057 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2058 * *retval instead of a return value so that success does not force ret_val to
2059 * be set to 0. This ensures that a sequence of calls to this function
2060 * preserve the previous value of *retval on successful delete.
2063 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2064 struct i40e_aqc_remove_macvlan_element_data *list,
2065 int num_del, int *retval)
2067 struct i40e_hw *hw = &vsi->back->hw;
2071 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2072 aq_err = hw->aq.asq_last_status;
2074 /* Explicitly ignore and do not report when firmware returns ENOENT */
2075 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2077 dev_info(&vsi->back->pdev->dev,
2078 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2079 vsi_name, i40e_stat_str(hw, aq_ret),
2080 i40e_aq_str(hw, aq_err));
2085 * i40e_aqc_add_filters - Request firmware to add a set of filters
2086 * @vsi: ptr to the VSI
2087 * @vsi_name: name to display in messages
2088 * @list: the list of filters to send to firmware
2089 * @add_head: Position in the add hlist
2090 * @num_add: the number of filters to add
2092 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2093 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2094 * space for more filters.
2097 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2098 struct i40e_aqc_add_macvlan_element_data *list,
2099 struct i40e_new_mac_filter *add_head,
2102 struct i40e_hw *hw = &vsi->back->hw;
2105 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2106 aq_err = hw->aq.asq_last_status;
2107 fcnt = i40e_update_filter_state(num_add, list, add_head);
2109 if (fcnt != num_add) {
2110 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2111 dev_warn(&vsi->back->pdev->dev,
2112 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2113 i40e_aq_str(hw, aq_err),
2119 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2120 * @vsi: pointer to the VSI
2121 * @vsi_name: the VSI name
2124 * This function sets or clears the promiscuous broadcast flags for VLAN
2125 * filters in order to properly receive broadcast frames. Assumes that only
2126 * broadcast filters are passed.
2128 * Returns status indicating success or failure;
2131 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2132 struct i40e_mac_filter *f)
2134 bool enable = f->state == I40E_FILTER_NEW;
2135 struct i40e_hw *hw = &vsi->back->hw;
2138 if (f->vlan == I40E_VLAN_ANY) {
2139 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2144 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2152 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2153 dev_warn(&vsi->back->pdev->dev,
2154 "Error %s, forcing overflow promiscuous on %s\n",
2155 i40e_aq_str(hw, hw->aq.asq_last_status),
2163 * i40e_set_promiscuous - set promiscuous mode
2164 * @pf: board private structure
2165 * @promisc: promisc on or off
2167 * There are different ways of setting promiscuous mode on a PF depending on
2168 * what state/environment we're in. This identifies and sets it appropriately.
2169 * Returns 0 on success.
2171 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2173 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2174 struct i40e_hw *hw = &pf->hw;
2177 if (vsi->type == I40E_VSI_MAIN &&
2178 pf->lan_veb != I40E_NO_VEB &&
2179 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2180 /* set defport ON for Main VSI instead of true promisc
2181 * this way we will get all unicast/multicast and VLAN
2182 * promisc behavior but will not get VF or VMDq traffic
2183 * replicated on the Main VSI.
2186 aq_ret = i40e_aq_set_default_vsi(hw,
2190 aq_ret = i40e_aq_clear_default_vsi(hw,
2194 dev_info(&pf->pdev->dev,
2195 "Set default VSI failed, err %s, aq_err %s\n",
2196 i40e_stat_str(hw, aq_ret),
2197 i40e_aq_str(hw, hw->aq.asq_last_status));
2200 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2206 dev_info(&pf->pdev->dev,
2207 "set unicast promisc failed, err %s, aq_err %s\n",
2208 i40e_stat_str(hw, aq_ret),
2209 i40e_aq_str(hw, hw->aq.asq_last_status));
2211 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2216 dev_info(&pf->pdev->dev,
2217 "set multicast promisc failed, err %s, aq_err %s\n",
2218 i40e_stat_str(hw, aq_ret),
2219 i40e_aq_str(hw, hw->aq.asq_last_status));
2224 pf->cur_promisc = promisc;
2230 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2231 * @vsi: ptr to the VSI
2233 * Push any outstanding VSI filter changes through the AdminQ.
2235 * Returns 0 or error value
2237 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2239 struct hlist_head tmp_add_list, tmp_del_list;
2240 struct i40e_mac_filter *f;
2241 struct i40e_new_mac_filter *new, *add_head = NULL;
2242 struct i40e_hw *hw = &vsi->back->hw;
2243 bool old_overflow, new_overflow;
2244 unsigned int failed_filters = 0;
2245 unsigned int vlan_filters = 0;
2246 char vsi_name[16] = "PF";
2247 int filter_list_len = 0;
2248 i40e_status aq_ret = 0;
2249 u32 changed_flags = 0;
2250 struct hlist_node *h;
2259 /* empty array typed pointers, kcalloc later */
2260 struct i40e_aqc_add_macvlan_element_data *add_list;
2261 struct i40e_aqc_remove_macvlan_element_data *del_list;
2263 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2264 usleep_range(1000, 2000);
2267 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2270 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2271 vsi->current_netdev_flags = vsi->netdev->flags;
2274 INIT_HLIST_HEAD(&tmp_add_list);
2275 INIT_HLIST_HEAD(&tmp_del_list);
2277 if (vsi->type == I40E_VSI_SRIOV)
2278 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2279 else if (vsi->type != I40E_VSI_MAIN)
2280 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2282 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2283 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2285 spin_lock_bh(&vsi->mac_filter_hash_lock);
2286 /* Create a list of filters to delete. */
2287 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2288 if (f->state == I40E_FILTER_REMOVE) {
2289 /* Move the element into temporary del_list */
2290 hash_del(&f->hlist);
2291 hlist_add_head(&f->hlist, &tmp_del_list);
2293 /* Avoid counting removed filters */
2296 if (f->state == I40E_FILTER_NEW) {
2297 /* Create a temporary i40e_new_mac_filter */
2298 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2300 goto err_no_memory_locked;
2302 /* Store pointer to the real filter */
2304 new->state = f->state;
2306 /* Add it to the hash list */
2307 hlist_add_head(&new->hlist, &tmp_add_list);
2310 /* Count the number of active (current and new) VLAN
2311 * filters we have now. Does not count filters which
2312 * are marked for deletion.
2318 retval = i40e_correct_mac_vlan_filters(vsi,
2323 goto err_no_memory_locked;
2325 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2328 /* Now process 'del_list' outside the lock */
2329 if (!hlist_empty(&tmp_del_list)) {
2330 filter_list_len = hw->aq.asq_buf_size /
2331 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2332 list_size = filter_list_len *
2333 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2334 del_list = kzalloc(list_size, GFP_ATOMIC);
2338 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2341 /* handle broadcast filters by updating the broadcast
2342 * promiscuous flag and release filter list.
2344 if (is_broadcast_ether_addr(f->macaddr)) {
2345 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2347 hlist_del(&f->hlist);
2352 /* add to delete list */
2353 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2354 if (f->vlan == I40E_VLAN_ANY) {
2355 del_list[num_del].vlan_tag = 0;
2356 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2358 del_list[num_del].vlan_tag =
2359 cpu_to_le16((u16)(f->vlan));
2362 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2363 del_list[num_del].flags = cmd_flags;
2366 /* flush a full buffer */
2367 if (num_del == filter_list_len) {
2368 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2370 memset(del_list, 0, list_size);
2373 /* Release memory for MAC filter entries which were
2374 * synced up with HW.
2376 hlist_del(&f->hlist);
2381 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2389 if (!hlist_empty(&tmp_add_list)) {
2390 /* Do all the adds now. */
2391 filter_list_len = hw->aq.asq_buf_size /
2392 sizeof(struct i40e_aqc_add_macvlan_element_data);
2393 list_size = filter_list_len *
2394 sizeof(struct i40e_aqc_add_macvlan_element_data);
2395 add_list = kzalloc(list_size, GFP_ATOMIC);
2400 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2401 /* handle broadcast filters by updating the broadcast
2402 * promiscuous flag instead of adding a MAC filter.
2404 if (is_broadcast_ether_addr(new->f->macaddr)) {
2405 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2407 new->state = I40E_FILTER_FAILED;
2409 new->state = I40E_FILTER_ACTIVE;
2413 /* add to add array */
2417 ether_addr_copy(add_list[num_add].mac_addr,
2419 if (new->f->vlan == I40E_VLAN_ANY) {
2420 add_list[num_add].vlan_tag = 0;
2421 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2423 add_list[num_add].vlan_tag =
2424 cpu_to_le16((u16)(new->f->vlan));
2426 add_list[num_add].queue_number = 0;
2427 /* set invalid match method for later detection */
2428 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2429 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2430 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2433 /* flush a full buffer */
2434 if (num_add == filter_list_len) {
2435 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2437 memset(add_list, 0, list_size);
2442 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2445 /* Now move all of the filters from the temp add list back to
2448 spin_lock_bh(&vsi->mac_filter_hash_lock);
2449 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2450 /* Only update the state if we're still NEW */
2451 if (new->f->state == I40E_FILTER_NEW)
2452 new->f->state = new->state;
2453 hlist_del(&new->hlist);
2456 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2461 /* Determine the number of active and failed filters. */
2462 spin_lock_bh(&vsi->mac_filter_hash_lock);
2463 vsi->active_filters = 0;
2464 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2465 if (f->state == I40E_FILTER_ACTIVE)
2466 vsi->active_filters++;
2467 else if (f->state == I40E_FILTER_FAILED)
2470 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2472 /* Check if we are able to exit overflow promiscuous mode. We can
2473 * safely exit if we didn't just enter, we no longer have any failed
2474 * filters, and we have reduced filters below the threshold value.
2476 if (old_overflow && !failed_filters &&
2477 vsi->active_filters < vsi->promisc_threshold) {
2478 dev_info(&pf->pdev->dev,
2479 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2481 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2482 vsi->promisc_threshold = 0;
2485 /* if the VF is not trusted do not do promisc */
2486 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2487 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2491 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2493 /* If we are entering overflow promiscuous, we need to calculate a new
2494 * threshold for when we are safe to exit
2496 if (!old_overflow && new_overflow)
2497 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2499 /* check for changes in promiscuous modes */
2500 if (changed_flags & IFF_ALLMULTI) {
2501 bool cur_multipromisc;
2503 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2504 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2509 retval = i40e_aq_rc_to_posix(aq_ret,
2510 hw->aq.asq_last_status);
2511 dev_info(&pf->pdev->dev,
2512 "set multi promisc failed on %s, err %s aq_err %s\n",
2514 i40e_stat_str(hw, aq_ret),
2515 i40e_aq_str(hw, hw->aq.asq_last_status));
2519 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2522 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2524 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2526 retval = i40e_aq_rc_to_posix(aq_ret,
2527 hw->aq.asq_last_status);
2528 dev_info(&pf->pdev->dev,
2529 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2530 cur_promisc ? "on" : "off",
2532 i40e_stat_str(hw, aq_ret),
2533 i40e_aq_str(hw, hw->aq.asq_last_status));
2537 /* if something went wrong then set the changed flag so we try again */
2539 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2541 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2545 /* Restore elements on the temporary add and delete lists */
2546 spin_lock_bh(&vsi->mac_filter_hash_lock);
2547 err_no_memory_locked:
2548 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2549 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2550 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2552 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2553 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2558 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2559 * @pf: board private structure
2561 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2567 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2570 for (v = 0; v < pf->num_alloc_vsi; v++) {
2572 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2573 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2576 /* come back and try again later */
2577 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2586 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2589 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2591 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2592 return I40E_RXBUFFER_2048;
2594 return I40E_RXBUFFER_3072;
2598 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2599 * @netdev: network interface device structure
2600 * @new_mtu: new value for maximum frame size
2602 * Returns 0 on success, negative on failure
2604 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2606 struct i40e_netdev_priv *np = netdev_priv(netdev);
2607 struct i40e_vsi *vsi = np->vsi;
2608 struct i40e_pf *pf = vsi->back;
2610 if (i40e_enabled_xdp_vsi(vsi)) {
2611 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2613 if (frame_size > i40e_max_xdp_frame_size(vsi))
2617 netdev_info(netdev, "changing MTU from %d to %d\n",
2618 netdev->mtu, new_mtu);
2619 netdev->mtu = new_mtu;
2620 if (netif_running(netdev))
2621 i40e_vsi_reinit_locked(vsi);
2622 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2623 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2628 * i40e_ioctl - Access the hwtstamp interface
2629 * @netdev: network interface device structure
2630 * @ifr: interface request data
2631 * @cmd: ioctl command
2633 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2635 struct i40e_netdev_priv *np = netdev_priv(netdev);
2636 struct i40e_pf *pf = np->vsi->back;
2640 return i40e_ptp_get_ts_config(pf, ifr);
2642 return i40e_ptp_set_ts_config(pf, ifr);
2649 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2650 * @vsi: the vsi being adjusted
2652 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2654 struct i40e_vsi_context ctxt;
2657 if ((vsi->info.valid_sections &
2658 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2659 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2660 return; /* already enabled */
2662 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2663 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2664 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2666 ctxt.seid = vsi->seid;
2667 ctxt.info = vsi->info;
2668 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2670 dev_info(&vsi->back->pdev->dev,
2671 "update vlan stripping failed, err %s aq_err %s\n",
2672 i40e_stat_str(&vsi->back->hw, ret),
2673 i40e_aq_str(&vsi->back->hw,
2674 vsi->back->hw.aq.asq_last_status));
2679 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2680 * @vsi: the vsi being adjusted
2682 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2684 struct i40e_vsi_context ctxt;
2687 if ((vsi->info.valid_sections &
2688 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2689 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2690 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2691 return; /* already disabled */
2693 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2694 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2695 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2697 ctxt.seid = vsi->seid;
2698 ctxt.info = vsi->info;
2699 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2701 dev_info(&vsi->back->pdev->dev,
2702 "update vlan stripping failed, err %s aq_err %s\n",
2703 i40e_stat_str(&vsi->back->hw, ret),
2704 i40e_aq_str(&vsi->back->hw,
2705 vsi->back->hw.aq.asq_last_status));
2710 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2711 * @vsi: the vsi being configured
2712 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2714 * This is a helper function for adding a new MAC/VLAN filter with the
2715 * specified VLAN for each existing MAC address already in the hash table.
2716 * This function does *not* perform any accounting to update filters based on
2719 * NOTE: this function expects to be called while under the
2720 * mac_filter_hash_lock
2722 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2724 struct i40e_mac_filter *f, *add_f;
2725 struct hlist_node *h;
2728 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2729 if (f->state == I40E_FILTER_REMOVE)
2731 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2733 dev_info(&vsi->back->pdev->dev,
2734 "Could not add vlan filter %d for %pM\n",
2744 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2745 * @vsi: the VSI being configured
2746 * @vid: VLAN id to be added
2748 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2755 /* The network stack will attempt to add VID=0, with the intention to
2756 * receive priority tagged packets with a VLAN of 0. Our HW receives
2757 * these packets by default when configured to receive untagged
2758 * packets, so we don't need to add a filter for this case.
2759 * Additionally, HW interprets adding a VID=0 filter as meaning to
2760 * receive *only* tagged traffic and stops receiving untagged traffic.
2761 * Thus, we do not want to actually add a filter for VID=0
2766 /* Locked once because all functions invoked below iterates list*/
2767 spin_lock_bh(&vsi->mac_filter_hash_lock);
2768 err = i40e_add_vlan_all_mac(vsi, vid);
2769 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2773 /* schedule our worker thread which will take care of
2774 * applying the new filter changes
2776 i40e_service_event_schedule(vsi->back);
2781 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2782 * @vsi: the vsi being configured
2783 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2785 * This function should be used to remove all VLAN filters which match the
2786 * given VID. It does not schedule the service event and does not take the
2787 * mac_filter_hash_lock so it may be combined with other operations under
2788 * a single invocation of the mac_filter_hash_lock.
2790 * NOTE: this function expects to be called while under the
2791 * mac_filter_hash_lock
2793 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2795 struct i40e_mac_filter *f;
2796 struct hlist_node *h;
2799 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2801 __i40e_del_filter(vsi, f);
2806 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2807 * @vsi: the VSI being configured
2808 * @vid: VLAN id to be removed
2810 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2812 if (!vid || vsi->info.pvid)
2815 spin_lock_bh(&vsi->mac_filter_hash_lock);
2816 i40e_rm_vlan_all_mac(vsi, vid);
2817 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2819 /* schedule our worker thread which will take care of
2820 * applying the new filter changes
2822 i40e_service_event_schedule(vsi->back);
2826 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2827 * @netdev: network interface to be adjusted
2828 * @proto: unused protocol value
2829 * @vid: vlan id to be added
2831 * net_device_ops implementation for adding vlan ids
2833 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2834 __always_unused __be16 proto, u16 vid)
2836 struct i40e_netdev_priv *np = netdev_priv(netdev);
2837 struct i40e_vsi *vsi = np->vsi;
2840 if (vid >= VLAN_N_VID)
2843 ret = i40e_vsi_add_vlan(vsi, vid);
2845 set_bit(vid, vsi->active_vlans);
2851 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2852 * @netdev: network interface to be adjusted
2853 * @proto: unused protocol value
2854 * @vid: vlan id to be added
2856 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2857 __always_unused __be16 proto, u16 vid)
2859 struct i40e_netdev_priv *np = netdev_priv(netdev);
2860 struct i40e_vsi *vsi = np->vsi;
2862 if (vid >= VLAN_N_VID)
2864 set_bit(vid, vsi->active_vlans);
2868 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2869 * @netdev: network interface to be adjusted
2870 * @proto: unused protocol value
2871 * @vid: vlan id to be removed
2873 * net_device_ops implementation for removing vlan ids
2875 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2876 __always_unused __be16 proto, u16 vid)
2878 struct i40e_netdev_priv *np = netdev_priv(netdev);
2879 struct i40e_vsi *vsi = np->vsi;
2881 /* return code is ignored as there is nothing a user
2882 * can do about failure to remove and a log message was
2883 * already printed from the other function
2885 i40e_vsi_kill_vlan(vsi, vid);
2887 clear_bit(vid, vsi->active_vlans);
2893 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2894 * @vsi: the vsi being brought back up
2896 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2903 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2904 i40e_vlan_stripping_enable(vsi);
2906 i40e_vlan_stripping_disable(vsi);
2908 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2909 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2914 * i40e_vsi_add_pvid - Add pvid for the VSI
2915 * @vsi: the vsi being adjusted
2916 * @vid: the vlan id to set as a PVID
2918 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2920 struct i40e_vsi_context ctxt;
2923 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2924 vsi->info.pvid = cpu_to_le16(vid);
2925 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2926 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2927 I40E_AQ_VSI_PVLAN_EMOD_STR;
2929 ctxt.seid = vsi->seid;
2930 ctxt.info = vsi->info;
2931 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2933 dev_info(&vsi->back->pdev->dev,
2934 "add pvid failed, err %s aq_err %s\n",
2935 i40e_stat_str(&vsi->back->hw, ret),
2936 i40e_aq_str(&vsi->back->hw,
2937 vsi->back->hw.aq.asq_last_status));
2945 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2946 * @vsi: the vsi being adjusted
2948 * Just use the vlan_rx_register() service to put it back to normal
2950 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2952 i40e_vlan_stripping_disable(vsi);
2958 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2959 * @vsi: ptr to the VSI
2961 * If this function returns with an error, then it's possible one or
2962 * more of the rings is populated (while the rest are not). It is the
2963 * callers duty to clean those orphaned rings.
2965 * Return 0 on success, negative on failure
2967 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2971 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2972 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2974 if (!i40e_enabled_xdp_vsi(vsi))
2977 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2978 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2984 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2985 * @vsi: ptr to the VSI
2987 * Free VSI's transmit software resources
2989 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2993 if (vsi->tx_rings) {
2994 for (i = 0; i < vsi->num_queue_pairs; i++)
2995 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2996 i40e_free_tx_resources(vsi->tx_rings[i]);
2999 if (vsi->xdp_rings) {
3000 for (i = 0; i < vsi->num_queue_pairs; i++)
3001 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3002 i40e_free_tx_resources(vsi->xdp_rings[i]);
3007 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3008 * @vsi: ptr to the VSI
3010 * If this function returns with an error, then it's possible one or
3011 * more of the rings is populated (while the rest are not). It is the
3012 * callers duty to clean those orphaned rings.
3014 * Return 0 on success, negative on failure
3016 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3020 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3021 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3026 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3027 * @vsi: ptr to the VSI
3029 * Free all receive software resources
3031 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3038 for (i = 0; i < vsi->num_queue_pairs; i++)
3039 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3040 i40e_free_rx_resources(vsi->rx_rings[i]);
3044 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3045 * @ring: The Tx ring to configure
3047 * This enables/disables XPS for a given Tx descriptor ring
3048 * based on the TCs enabled for the VSI that ring belongs to.
3050 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3054 if (!ring->q_vector || !ring->netdev || ring->ch)
3057 /* We only initialize XPS once, so as not to overwrite user settings */
3058 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3061 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3062 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3067 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3068 * @ring: The Tx or Rx ring
3070 * Returns the UMEM or NULL.
3072 static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3074 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3075 int qid = ring->queue_index;
3077 if (ring_is_xdp(ring))
3078 qid -= ring->vsi->alloc_queue_pairs;
3083 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3087 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3088 * @ring: The Tx ring to configure
3090 * Configure the Tx descriptor ring in the HMC context.
3092 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3094 struct i40e_vsi *vsi = ring->vsi;
3095 u16 pf_q = vsi->base_queue + ring->queue_index;
3096 struct i40e_hw *hw = &vsi->back->hw;
3097 struct i40e_hmc_obj_txq tx_ctx;
3098 i40e_status err = 0;
3101 if (ring_is_xdp(ring))
3102 ring->xsk_umem = i40e_xsk_umem(ring);
3104 /* some ATR related tx ring init */
3105 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3106 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3107 ring->atr_count = 0;
3109 ring->atr_sample_rate = 0;
3113 i40e_config_xps_tx_ring(ring);
3115 /* clear the context structure first */
3116 memset(&tx_ctx, 0, sizeof(tx_ctx));
3118 tx_ctx.new_context = 1;
3119 tx_ctx.base = (ring->dma / 128);
3120 tx_ctx.qlen = ring->count;
3121 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3122 I40E_FLAG_FD_ATR_ENABLED));
3123 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3124 /* FDIR VSI tx ring can still use RS bit and writebacks */
3125 if (vsi->type != I40E_VSI_FDIR)
3126 tx_ctx.head_wb_ena = 1;
3127 tx_ctx.head_wb_addr = ring->dma +
3128 (ring->count * sizeof(struct i40e_tx_desc));
3130 /* As part of VSI creation/update, FW allocates certain
3131 * Tx arbitration queue sets for each TC enabled for
3132 * the VSI. The FW returns the handles to these queue
3133 * sets as part of the response buffer to Add VSI,
3134 * Update VSI, etc. AQ commands. It is expected that
3135 * these queue set handles be associated with the Tx
3136 * queues by the driver as part of the TX queue context
3137 * initialization. This has to be done regardless of
3138 * DCB as by default everything is mapped to TC0.
3143 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3146 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3148 tx_ctx.rdylist_act = 0;
3150 /* clear the context in the HMC */
3151 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3153 dev_info(&vsi->back->pdev->dev,
3154 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3155 ring->queue_index, pf_q, err);
3159 /* set the context in the HMC */
3160 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3162 dev_info(&vsi->back->pdev->dev,
3163 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3164 ring->queue_index, pf_q, err);
3168 /* Now associate this queue with this PCI function */
3170 if (ring->ch->type == I40E_VSI_VMDQ2)
3171 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3175 qtx_ctl |= (ring->ch->vsi_number <<
3176 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3177 I40E_QTX_CTL_VFVM_INDX_MASK;
3179 if (vsi->type == I40E_VSI_VMDQ2) {
3180 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3181 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3182 I40E_QTX_CTL_VFVM_INDX_MASK;
3184 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3188 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3189 I40E_QTX_CTL_PF_INDX_MASK);
3190 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3193 /* cache tail off for easier writes later */
3194 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3200 * i40e_configure_rx_ring - Configure a receive ring context
3201 * @ring: The Rx ring to configure
3203 * Configure the Rx descriptor ring in the HMC context.
3205 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3207 struct i40e_vsi *vsi = ring->vsi;
3208 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3209 u16 pf_q = vsi->base_queue + ring->queue_index;
3210 struct i40e_hw *hw = &vsi->back->hw;
3211 struct i40e_hmc_obj_rxq rx_ctx;
3212 i40e_status err = 0;
3216 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3218 /* clear the context structure first */
3219 memset(&rx_ctx, 0, sizeof(rx_ctx));
3221 if (ring->vsi->type == I40E_VSI_MAIN)
3222 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
3224 ring->xsk_umem = i40e_xsk_umem(ring);
3225 if (ring->xsk_umem) {
3226 ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
3227 XDP_PACKET_HEADROOM;
3228 /* For AF_XDP ZC, we disallow packets to span on
3229 * multiple buffers, thus letting us skip that
3230 * handling in the fast-path.
3233 ring->zca.free = i40e_zca_free;
3234 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3239 dev_info(&vsi->back->pdev->dev,
3240 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3244 ring->rx_buf_len = vsi->rx_buf_len;
3245 if (ring->vsi->type == I40E_VSI_MAIN) {
3246 ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
3247 MEM_TYPE_PAGE_SHARED,
3254 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3255 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3257 rx_ctx.base = (ring->dma / 128);
3258 rx_ctx.qlen = ring->count;
3260 /* use 32 byte descriptors */
3263 /* descriptor type is always zero
3266 rx_ctx.hsplit_0 = 0;
3268 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3269 if (hw->revision_id == 0)
3270 rx_ctx.lrxqthresh = 0;
3272 rx_ctx.lrxqthresh = 1;
3273 rx_ctx.crcstrip = 1;
3275 /* this controls whether VLAN is stripped from inner headers */
3277 /* set the prefena field to 1 because the manual says to */
3280 /* clear the context in the HMC */
3281 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3283 dev_info(&vsi->back->pdev->dev,
3284 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3285 ring->queue_index, pf_q, err);
3289 /* set the context in the HMC */
3290 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3292 dev_info(&vsi->back->pdev->dev,
3293 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3294 ring->queue_index, pf_q, err);
3298 /* configure Rx buffer alignment */
3299 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3300 clear_ring_build_skb_enabled(ring);
3302 set_ring_build_skb_enabled(ring);
3304 /* cache tail for quicker writes, and clear the reg before use */
3305 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3306 writel(0, ring->tail);
3308 ok = ring->xsk_umem ?
3309 i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
3310 !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3312 /* Log this in case the user has forgotten to give the kernel
3313 * any buffers, even later in the application.
3315 dev_info(&vsi->back->pdev->dev,
3316 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3317 ring->xsk_umem ? "UMEM enabled " : "",
3318 ring->queue_index, pf_q);
3325 * i40e_vsi_configure_tx - Configure the VSI for Tx
3326 * @vsi: VSI structure describing this set of rings and resources
3328 * Configure the Tx VSI for operation.
3330 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3335 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3336 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3338 if (!i40e_enabled_xdp_vsi(vsi))
3341 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3342 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3348 * i40e_vsi_configure_rx - Configure the VSI for Rx
3349 * @vsi: the VSI being configured
3351 * Configure the Rx VSI for operation.
3353 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3358 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3359 vsi->max_frame = I40E_MAX_RXBUFFER;
3360 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3361 #if (PAGE_SIZE < 8192)
3362 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3363 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3364 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3365 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3368 vsi->max_frame = I40E_MAX_RXBUFFER;
3369 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3373 /* set up individual rings */
3374 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3375 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3381 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3382 * @vsi: ptr to the VSI
3384 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3386 struct i40e_ring *tx_ring, *rx_ring;
3387 u16 qoffset, qcount;
3390 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3391 /* Reset the TC information */
3392 for (i = 0; i < vsi->num_queue_pairs; i++) {
3393 rx_ring = vsi->rx_rings[i];
3394 tx_ring = vsi->tx_rings[i];
3395 rx_ring->dcb_tc = 0;
3396 tx_ring->dcb_tc = 0;
3401 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3402 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3405 qoffset = vsi->tc_config.tc_info[n].qoffset;
3406 qcount = vsi->tc_config.tc_info[n].qcount;
3407 for (i = qoffset; i < (qoffset + qcount); i++) {
3408 rx_ring = vsi->rx_rings[i];
3409 tx_ring = vsi->tx_rings[i];
3410 rx_ring->dcb_tc = n;
3411 tx_ring->dcb_tc = n;
3417 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3418 * @vsi: ptr to the VSI
3420 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3423 i40e_set_rx_mode(vsi->netdev);
3427 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3428 * @vsi: Pointer to the targeted VSI
3430 * This function replays the hlist on the hw where all the SB Flow Director
3431 * filters were saved.
3433 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3435 struct i40e_fdir_filter *filter;
3436 struct i40e_pf *pf = vsi->back;
3437 struct hlist_node *node;
3439 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3442 /* Reset FDir counters as we're replaying all existing filters */
3443 pf->fd_tcp4_filter_cnt = 0;
3444 pf->fd_udp4_filter_cnt = 0;
3445 pf->fd_sctp4_filter_cnt = 0;
3446 pf->fd_ip4_filter_cnt = 0;
3448 hlist_for_each_entry_safe(filter, node,
3449 &pf->fdir_filter_list, fdir_node) {
3450 i40e_add_del_fdir(vsi, filter, true);
3455 * i40e_vsi_configure - Set up the VSI for action
3456 * @vsi: the VSI being configured
3458 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3462 i40e_set_vsi_rx_mode(vsi);
3463 i40e_restore_vlan(vsi);
3464 i40e_vsi_config_dcb_rings(vsi);
3465 err = i40e_vsi_configure_tx(vsi);
3467 err = i40e_vsi_configure_rx(vsi);
3473 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3474 * @vsi: the VSI being configured
3476 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3478 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3479 struct i40e_pf *pf = vsi->back;
3480 struct i40e_hw *hw = &pf->hw;
3485 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3486 * and PFINT_LNKLSTn registers, e.g.:
3487 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3489 qp = vsi->base_queue;
3490 vector = vsi->base_vector;
3491 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3492 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3494 q_vector->rx.next_update = jiffies + 1;
3495 q_vector->rx.target_itr =
3496 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3497 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3498 q_vector->rx.target_itr);
3499 q_vector->rx.current_itr = q_vector->rx.target_itr;
3501 q_vector->tx.next_update = jiffies + 1;
3502 q_vector->tx.target_itr =
3503 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3504 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3505 q_vector->tx.target_itr);
3506 q_vector->tx.current_itr = q_vector->tx.target_itr;
3508 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3509 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3511 /* Linked list for the queuepairs assigned to this vector */
3512 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3513 for (q = 0; q < q_vector->num_ringpairs; q++) {
3514 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3517 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3518 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3519 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3520 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3521 (I40E_QUEUE_TYPE_TX <<
3522 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3524 wr32(hw, I40E_QINT_RQCTL(qp), val);
3527 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3528 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3529 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3530 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3531 (I40E_QUEUE_TYPE_TX <<
3532 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3534 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3537 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3538 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3539 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3540 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3541 (I40E_QUEUE_TYPE_RX <<
3542 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3544 /* Terminate the linked list */
3545 if (q == (q_vector->num_ringpairs - 1))
3546 val |= (I40E_QUEUE_END_OF_LIST <<
3547 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3549 wr32(hw, I40E_QINT_TQCTL(qp), val);
3558 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3559 * @pf: pointer to private device data structure
3561 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3563 struct i40e_hw *hw = &pf->hw;
3566 /* clear things first */
3567 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3568 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3570 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3571 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3572 I40E_PFINT_ICR0_ENA_GRST_MASK |
3573 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3574 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3575 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3576 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3577 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3579 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3580 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3582 if (pf->flags & I40E_FLAG_PTP)
3583 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3585 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3587 /* SW_ITR_IDX = 0, but don't change INTENA */
3588 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3589 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3591 /* OTHER_ITR_IDX = 0 */
3592 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3596 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3597 * @vsi: the VSI being configured
3599 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3601 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3602 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3603 struct i40e_pf *pf = vsi->back;
3604 struct i40e_hw *hw = &pf->hw;
3607 /* set the ITR configuration */
3608 q_vector->rx.next_update = jiffies + 1;
3609 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3610 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
3611 q_vector->rx.current_itr = q_vector->rx.target_itr;
3612 q_vector->tx.next_update = jiffies + 1;
3613 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3614 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
3615 q_vector->tx.current_itr = q_vector->tx.target_itr;
3617 i40e_enable_misc_int_causes(pf);
3619 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3620 wr32(hw, I40E_PFINT_LNKLST0, 0);
3622 /* Associate the queue pair to the vector and enable the queue int */
3623 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3624 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3625 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3626 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3628 wr32(hw, I40E_QINT_RQCTL(0), val);
3630 if (i40e_enabled_xdp_vsi(vsi)) {
3631 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3632 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3634 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3636 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3639 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3640 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3641 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3643 wr32(hw, I40E_QINT_TQCTL(0), val);
3648 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3649 * @pf: board private structure
3651 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3653 struct i40e_hw *hw = &pf->hw;
3655 wr32(hw, I40E_PFINT_DYN_CTL0,
3656 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3661 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3662 * @pf: board private structure
3664 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3666 struct i40e_hw *hw = &pf->hw;
3669 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3670 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3671 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3673 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3678 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3679 * @irq: interrupt number
3680 * @data: pointer to a q_vector
3682 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3684 struct i40e_q_vector *q_vector = data;
3686 if (!q_vector->tx.ring && !q_vector->rx.ring)
3689 napi_schedule_irqoff(&q_vector->napi);
3695 * i40e_irq_affinity_notify - Callback for affinity changes
3696 * @notify: context as to what irq was changed
3697 * @mask: the new affinity mask
3699 * This is a callback function used by the irq_set_affinity_notifier function
3700 * so that we may register to receive changes to the irq affinity masks.
3702 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3703 const cpumask_t *mask)
3705 struct i40e_q_vector *q_vector =
3706 container_of(notify, struct i40e_q_vector, affinity_notify);
3708 cpumask_copy(&q_vector->affinity_mask, mask);
3712 * i40e_irq_affinity_release - Callback for affinity notifier release
3713 * @ref: internal core kernel usage
3715 * This is a callback function used by the irq_set_affinity_notifier function
3716 * to inform the current notification subscriber that they will no longer
3717 * receive notifications.
3719 static void i40e_irq_affinity_release(struct kref *ref) {}
3722 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3723 * @vsi: the VSI being configured
3724 * @basename: name for the vector
3726 * Allocates MSI-X vectors and requests interrupts from the kernel.
3728 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3730 int q_vectors = vsi->num_q_vectors;
3731 struct i40e_pf *pf = vsi->back;
3732 int base = vsi->base_vector;
3739 for (vector = 0; vector < q_vectors; vector++) {
3740 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3742 irq_num = pf->msix_entries[base + vector].vector;
3744 if (q_vector->tx.ring && q_vector->rx.ring) {
3745 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3746 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3748 } else if (q_vector->rx.ring) {
3749 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3750 "%s-%s-%d", basename, "rx", rx_int_idx++);
3751 } else if (q_vector->tx.ring) {
3752 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3753 "%s-%s-%d", basename, "tx", tx_int_idx++);
3755 /* skip this unused q_vector */
3758 err = request_irq(irq_num,
3764 dev_info(&pf->pdev->dev,
3765 "MSIX request_irq failed, error: %d\n", err);
3766 goto free_queue_irqs;
3769 /* register for affinity change notifications */
3770 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3771 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3772 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3773 /* Spread affinity hints out across online CPUs.
3775 * get_cpu_mask returns a static constant mask with
3776 * a permanent lifetime so it's ok to pass to
3777 * irq_set_affinity_hint without making a copy.
3779 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3780 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3783 vsi->irqs_ready = true;
3789 irq_num = pf->msix_entries[base + vector].vector;
3790 irq_set_affinity_notifier(irq_num, NULL);
3791 irq_set_affinity_hint(irq_num, NULL);
3792 free_irq(irq_num, &vsi->q_vectors[vector]);
3798 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3799 * @vsi: the VSI being un-configured
3801 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3803 struct i40e_pf *pf = vsi->back;
3804 struct i40e_hw *hw = &pf->hw;
3805 int base = vsi->base_vector;
3808 /* disable interrupt causation from each queue */
3809 for (i = 0; i < vsi->num_queue_pairs; i++) {
3812 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3813 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3814 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3816 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3817 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3818 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3820 if (!i40e_enabled_xdp_vsi(vsi))
3822 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3825 /* disable each interrupt */
3826 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3827 for (i = vsi->base_vector;
3828 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3829 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3832 for (i = 0; i < vsi->num_q_vectors; i++)
3833 synchronize_irq(pf->msix_entries[i + base].vector);
3835 /* Legacy and MSI mode - this stops all interrupt handling */
3836 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3837 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3839 synchronize_irq(pf->pdev->irq);
3844 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3845 * @vsi: the VSI being configured
3847 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3849 struct i40e_pf *pf = vsi->back;
3852 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3853 for (i = 0; i < vsi->num_q_vectors; i++)
3854 i40e_irq_dynamic_enable(vsi, i);
3856 i40e_irq_dynamic_enable_icr0(pf);
3859 i40e_flush(&pf->hw);
3864 * i40e_free_misc_vector - Free the vector that handles non-queue events
3865 * @pf: board private structure
3867 static void i40e_free_misc_vector(struct i40e_pf *pf)
3870 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3871 i40e_flush(&pf->hw);
3873 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3874 synchronize_irq(pf->msix_entries[0].vector);
3875 free_irq(pf->msix_entries[0].vector, pf);
3876 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3881 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3882 * @irq: interrupt number
3883 * @data: pointer to a q_vector
3885 * This is the handler used for all MSI/Legacy interrupts, and deals
3886 * with both queue and non-queue interrupts. This is also used in
3887 * MSIX mode to handle the non-queue interrupts.
3889 static irqreturn_t i40e_intr(int irq, void *data)
3891 struct i40e_pf *pf = (struct i40e_pf *)data;
3892 struct i40e_hw *hw = &pf->hw;
3893 irqreturn_t ret = IRQ_NONE;
3894 u32 icr0, icr0_remaining;
3897 icr0 = rd32(hw, I40E_PFINT_ICR0);
3898 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3900 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3901 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3904 /* if interrupt but no bits showing, must be SWINT */
3905 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3906 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3909 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3910 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3911 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3912 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3913 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3916 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3917 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3918 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3919 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3921 /* We do not have a way to disarm Queue causes while leaving
3922 * interrupt enabled for all other causes, ideally
3923 * interrupt should be disabled while we are in NAPI but
3924 * this is not a performance path and napi_schedule()
3925 * can deal with rescheduling.
3927 if (!test_bit(__I40E_DOWN, pf->state))
3928 napi_schedule_irqoff(&q_vector->napi);
3931 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3932 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3933 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3934 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3937 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3938 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3939 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3942 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3943 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3944 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3947 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3948 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3949 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3950 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3951 val = rd32(hw, I40E_GLGEN_RSTAT);
3952 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3953 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3954 if (val == I40E_RESET_CORER) {
3956 } else if (val == I40E_RESET_GLOBR) {
3958 } else if (val == I40E_RESET_EMPR) {
3960 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3964 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3965 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3966 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3967 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3968 rd32(hw, I40E_PFHMC_ERRORINFO),
3969 rd32(hw, I40E_PFHMC_ERRORDATA));
3972 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3973 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3975 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3976 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3977 i40e_ptp_tx_hwtstamp(pf);
3981 /* If a critical error is pending we have no choice but to reset the
3983 * Report and mask out any remaining unexpected interrupts.
3985 icr0_remaining = icr0 & ena_mask;
3986 if (icr0_remaining) {
3987 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3989 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3990 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3991 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3992 dev_info(&pf->pdev->dev, "device will be reset\n");
3993 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3994 i40e_service_event_schedule(pf);
3996 ena_mask &= ~icr0_remaining;
4001 /* re-enable interrupt causes */
4002 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4003 if (!test_bit(__I40E_DOWN, pf->state)) {
4004 i40e_service_event_schedule(pf);
4005 i40e_irq_dynamic_enable_icr0(pf);
4012 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4013 * @tx_ring: tx ring to clean
4014 * @budget: how many cleans we're allowed
4016 * Returns true if there's any budget left (e.g. the clean is finished)
4018 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4020 struct i40e_vsi *vsi = tx_ring->vsi;
4021 u16 i = tx_ring->next_to_clean;
4022 struct i40e_tx_buffer *tx_buf;
4023 struct i40e_tx_desc *tx_desc;
4025 tx_buf = &tx_ring->tx_bi[i];
4026 tx_desc = I40E_TX_DESC(tx_ring, i);
4027 i -= tx_ring->count;
4030 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4032 /* if next_to_watch is not set then there is no work pending */
4036 /* prevent any other reads prior to eop_desc */
4039 /* if the descriptor isn't done, no work yet to do */
4040 if (!(eop_desc->cmd_type_offset_bsz &
4041 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4044 /* clear next_to_watch to prevent false hangs */
4045 tx_buf->next_to_watch = NULL;
4047 tx_desc->buffer_addr = 0;
4048 tx_desc->cmd_type_offset_bsz = 0;
4049 /* move past filter desc */
4054 i -= tx_ring->count;
4055 tx_buf = tx_ring->tx_bi;
4056 tx_desc = I40E_TX_DESC(tx_ring, 0);
4058 /* unmap skb header data */
4059 dma_unmap_single(tx_ring->dev,
4060 dma_unmap_addr(tx_buf, dma),
4061 dma_unmap_len(tx_buf, len),
4063 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4064 kfree(tx_buf->raw_buf);
4066 tx_buf->raw_buf = NULL;
4067 tx_buf->tx_flags = 0;
4068 tx_buf->next_to_watch = NULL;
4069 dma_unmap_len_set(tx_buf, len, 0);
4070 tx_desc->buffer_addr = 0;
4071 tx_desc->cmd_type_offset_bsz = 0;
4073 /* move us past the eop_desc for start of next FD desc */
4078 i -= tx_ring->count;
4079 tx_buf = tx_ring->tx_bi;
4080 tx_desc = I40E_TX_DESC(tx_ring, 0);
4083 /* update budget accounting */
4085 } while (likely(budget));
4087 i += tx_ring->count;
4088 tx_ring->next_to_clean = i;
4090 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4091 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4097 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4098 * @irq: interrupt number
4099 * @data: pointer to a q_vector
4101 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4103 struct i40e_q_vector *q_vector = data;
4104 struct i40e_vsi *vsi;
4106 if (!q_vector->tx.ring)
4109 vsi = q_vector->tx.ring->vsi;
4110 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4116 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4117 * @vsi: the VSI being configured
4118 * @v_idx: vector index
4119 * @qp_idx: queue pair index
4121 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4123 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4124 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4125 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4127 tx_ring->q_vector = q_vector;
4128 tx_ring->next = q_vector->tx.ring;
4129 q_vector->tx.ring = tx_ring;
4130 q_vector->tx.count++;
4132 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4133 if (i40e_enabled_xdp_vsi(vsi)) {
4134 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4136 xdp_ring->q_vector = q_vector;
4137 xdp_ring->next = q_vector->tx.ring;
4138 q_vector->tx.ring = xdp_ring;
4139 q_vector->tx.count++;
4142 rx_ring->q_vector = q_vector;
4143 rx_ring->next = q_vector->rx.ring;
4144 q_vector->rx.ring = rx_ring;
4145 q_vector->rx.count++;
4149 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4150 * @vsi: the VSI being configured
4152 * This function maps descriptor rings to the queue-specific vectors
4153 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4154 * one vector per queue pair, but on a constrained vector budget, we
4155 * group the queue pairs as "efficiently" as possible.
4157 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4159 int qp_remaining = vsi->num_queue_pairs;
4160 int q_vectors = vsi->num_q_vectors;
4165 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4166 * group them so there are multiple queues per vector.
4167 * It is also important to go through all the vectors available to be
4168 * sure that if we don't use all the vectors, that the remaining vectors
4169 * are cleared. This is especially important when decreasing the
4170 * number of queues in use.
4172 for (; v_start < q_vectors; v_start++) {
4173 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4175 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4177 q_vector->num_ringpairs = num_ringpairs;
4178 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4180 q_vector->rx.count = 0;
4181 q_vector->tx.count = 0;
4182 q_vector->rx.ring = NULL;
4183 q_vector->tx.ring = NULL;
4185 while (num_ringpairs--) {
4186 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4194 * i40e_vsi_request_irq - Request IRQ from the OS
4195 * @vsi: the VSI being configured
4196 * @basename: name for the vector
4198 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4200 struct i40e_pf *pf = vsi->back;
4203 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4204 err = i40e_vsi_request_irq_msix(vsi, basename);
4205 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4206 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4209 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4213 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4218 #ifdef CONFIG_NET_POLL_CONTROLLER
4220 * i40e_netpoll - A Polling 'interrupt' handler
4221 * @netdev: network interface device structure
4223 * This is used by netconsole to send skbs without having to re-enable
4224 * interrupts. It's not called while the normal interrupt routine is executing.
4226 static void i40e_netpoll(struct net_device *netdev)
4228 struct i40e_netdev_priv *np = netdev_priv(netdev);
4229 struct i40e_vsi *vsi = np->vsi;
4230 struct i40e_pf *pf = vsi->back;
4233 /* if interface is down do nothing */
4234 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4237 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4238 for (i = 0; i < vsi->num_q_vectors; i++)
4239 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4241 i40e_intr(pf->pdev->irq, netdev);
4246 #define I40E_QTX_ENA_WAIT_COUNT 50
4249 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4250 * @pf: the PF being configured
4251 * @pf_q: the PF queue
4252 * @enable: enable or disable state of the queue
4254 * This routine will wait for the given Tx queue of the PF to reach the
4255 * enabled or disabled state.
4256 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4257 * multiple retries; else will return 0 in case of success.
4259 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4264 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4265 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4266 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4269 usleep_range(10, 20);
4271 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4278 * i40e_control_tx_q - Start or stop a particular Tx queue
4279 * @pf: the PF structure
4280 * @pf_q: the PF queue to configure
4281 * @enable: start or stop the queue
4283 * This function enables or disables a single queue. Note that any delay
4284 * required after the operation is expected to be handled by the caller of
4287 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4289 struct i40e_hw *hw = &pf->hw;
4293 /* warn the TX unit of coming changes */
4294 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4296 usleep_range(10, 20);
4298 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4299 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4300 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4301 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4303 usleep_range(1000, 2000);
4306 /* Skip if the queue is already in the requested state */
4307 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4310 /* turn on/off the queue */
4312 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4313 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4315 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4318 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4322 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4324 * @pf: the PF structure
4325 * @pf_q: the PF queue to configure
4326 * @is_xdp: true if the queue is used for XDP
4327 * @enable: start or stop the queue
4329 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4330 bool is_xdp, bool enable)
4334 i40e_control_tx_q(pf, pf_q, enable);
4336 /* wait for the change to finish */
4337 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4339 dev_info(&pf->pdev->dev,
4340 "VSI seid %d %sTx ring %d %sable timeout\n",
4341 seid, (is_xdp ? "XDP " : ""), pf_q,
4342 (enable ? "en" : "dis"));
4349 * i40e_vsi_control_tx - Start or stop a VSI's rings
4350 * @vsi: the VSI being configured
4351 * @enable: start or stop the rings
4353 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4355 struct i40e_pf *pf = vsi->back;
4356 int i, pf_q, ret = 0;
4358 pf_q = vsi->base_queue;
4359 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4360 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4362 false /*is xdp*/, enable);
4366 if (!i40e_enabled_xdp_vsi(vsi))
4369 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4370 pf_q + vsi->alloc_queue_pairs,
4371 true /*is xdp*/, enable);
4379 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4380 * @pf: the PF being configured
4381 * @pf_q: the PF queue
4382 * @enable: enable or disable state of the queue
4384 * This routine will wait for the given Rx queue of the PF to reach the
4385 * enabled or disabled state.
4386 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4387 * multiple retries; else will return 0 in case of success.
4389 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4394 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4395 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4396 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4399 usleep_range(10, 20);
4401 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4408 * i40e_control_rx_q - Start or stop a particular Rx queue
4409 * @pf: the PF structure
4410 * @pf_q: the PF queue to configure
4411 * @enable: start or stop the queue
4413 * This function enables or disables a single queue. Note that
4414 * any delay required after the operation is expected to be
4415 * handled by the caller of this function.
4417 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4419 struct i40e_hw *hw = &pf->hw;
4423 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4424 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4425 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4426 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4428 usleep_range(1000, 2000);
4431 /* Skip if the queue is already in the requested state */
4432 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4435 /* turn on/off the queue */
4437 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4439 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4441 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4445 * i40e_control_wait_rx_q
4446 * @pf: the PF structure
4447 * @pf_q: queue being configured
4448 * @enable: start or stop the rings
4450 * This function enables or disables a single queue along with waiting
4451 * for the change to finish. The caller of this function should handle
4452 * the delays needed in the case of disabling queues.
4454 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4458 i40e_control_rx_q(pf, pf_q, enable);
4460 /* wait for the change to finish */
4461 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4469 * i40e_vsi_control_rx - Start or stop a VSI's rings
4470 * @vsi: the VSI being configured
4471 * @enable: start or stop the rings
4473 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4475 struct i40e_pf *pf = vsi->back;
4476 int i, pf_q, ret = 0;
4478 pf_q = vsi->base_queue;
4479 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4480 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4482 dev_info(&pf->pdev->dev,
4483 "VSI seid %d Rx ring %d %sable timeout\n",
4484 vsi->seid, pf_q, (enable ? "en" : "dis"));
4489 /* Due to HW errata, on Rx disable only, the register can indicate done
4490 * before it really is. Needs 50ms to be sure
4499 * i40e_vsi_start_rings - Start a VSI's rings
4500 * @vsi: the VSI being configured
4502 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4506 /* do rx first for enable and last for disable */
4507 ret = i40e_vsi_control_rx(vsi, true);
4510 ret = i40e_vsi_control_tx(vsi, true);
4516 * i40e_vsi_stop_rings - Stop a VSI's rings
4517 * @vsi: the VSI being configured
4519 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4521 /* When port TX is suspended, don't wait */
4522 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4523 return i40e_vsi_stop_rings_no_wait(vsi);
4525 /* do rx first for enable and last for disable
4526 * Ignore return value, we need to shutdown whatever we can
4528 i40e_vsi_control_tx(vsi, false);
4529 i40e_vsi_control_rx(vsi, false);
4533 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4534 * @vsi: the VSI being shutdown
4536 * This function stops all the rings for a VSI but does not delay to verify
4537 * that rings have been disabled. It is expected that the caller is shutting
4538 * down multiple VSIs at once and will delay together for all the VSIs after
4539 * initiating the shutdown. This is particularly useful for shutting down lots
4540 * of VFs together. Otherwise, a large delay can be incurred while configuring
4541 * each VSI in serial.
4543 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4545 struct i40e_pf *pf = vsi->back;
4548 pf_q = vsi->base_queue;
4549 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4550 i40e_control_tx_q(pf, pf_q, false);
4551 i40e_control_rx_q(pf, pf_q, false);
4556 * i40e_vsi_free_irq - Free the irq association with the OS
4557 * @vsi: the VSI being configured
4559 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4561 struct i40e_pf *pf = vsi->back;
4562 struct i40e_hw *hw = &pf->hw;
4563 int base = vsi->base_vector;
4567 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4568 if (!vsi->q_vectors)
4571 if (!vsi->irqs_ready)
4574 vsi->irqs_ready = false;
4575 for (i = 0; i < vsi->num_q_vectors; i++) {
4580 irq_num = pf->msix_entries[vector].vector;
4582 /* free only the irqs that were actually requested */
4583 if (!vsi->q_vectors[i] ||
4584 !vsi->q_vectors[i]->num_ringpairs)
4587 /* clear the affinity notifier in the IRQ descriptor */
4588 irq_set_affinity_notifier(irq_num, NULL);
4589 /* remove our suggested affinity mask for this IRQ */
4590 irq_set_affinity_hint(irq_num, NULL);
4591 synchronize_irq(irq_num);
4592 free_irq(irq_num, vsi->q_vectors[i]);
4594 /* Tear down the interrupt queue link list
4596 * We know that they come in pairs and always
4597 * the Rx first, then the Tx. To clear the
4598 * link list, stick the EOL value into the
4599 * next_q field of the registers.
4601 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4602 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4603 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4604 val |= I40E_QUEUE_END_OF_LIST
4605 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4606 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4608 while (qp != I40E_QUEUE_END_OF_LIST) {
4611 val = rd32(hw, I40E_QINT_RQCTL(qp));
4613 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4614 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4615 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4616 I40E_QINT_RQCTL_INTEVENT_MASK);
4618 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4619 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4621 wr32(hw, I40E_QINT_RQCTL(qp), val);
4623 val = rd32(hw, I40E_QINT_TQCTL(qp));
4625 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4626 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4628 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4629 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4630 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4631 I40E_QINT_TQCTL_INTEVENT_MASK);
4633 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4634 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4636 wr32(hw, I40E_QINT_TQCTL(qp), val);
4641 free_irq(pf->pdev->irq, pf);
4643 val = rd32(hw, I40E_PFINT_LNKLST0);
4644 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4645 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4646 val |= I40E_QUEUE_END_OF_LIST
4647 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4648 wr32(hw, I40E_PFINT_LNKLST0, val);
4650 val = rd32(hw, I40E_QINT_RQCTL(qp));
4651 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4652 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4653 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4654 I40E_QINT_RQCTL_INTEVENT_MASK);
4656 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4657 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4659 wr32(hw, I40E_QINT_RQCTL(qp), val);
4661 val = rd32(hw, I40E_QINT_TQCTL(qp));
4663 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4664 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4665 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4666 I40E_QINT_TQCTL_INTEVENT_MASK);
4668 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4669 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4671 wr32(hw, I40E_QINT_TQCTL(qp), val);
4676 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4677 * @vsi: the VSI being configured
4678 * @v_idx: Index of vector to be freed
4680 * This function frees the memory allocated to the q_vector. In addition if
4681 * NAPI is enabled it will delete any references to the NAPI struct prior
4682 * to freeing the q_vector.
4684 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4686 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4687 struct i40e_ring *ring;
4692 /* disassociate q_vector from rings */
4693 i40e_for_each_ring(ring, q_vector->tx)
4694 ring->q_vector = NULL;
4696 i40e_for_each_ring(ring, q_vector->rx)
4697 ring->q_vector = NULL;
4699 /* only VSI w/ an associated netdev is set up w/ NAPI */
4701 netif_napi_del(&q_vector->napi);
4703 vsi->q_vectors[v_idx] = NULL;
4705 kfree_rcu(q_vector, rcu);
4709 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4710 * @vsi: the VSI being un-configured
4712 * This frees the memory allocated to the q_vectors and
4713 * deletes references to the NAPI struct.
4715 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4719 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4720 i40e_free_q_vector(vsi, v_idx);
4724 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4725 * @pf: board private structure
4727 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4729 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4730 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4731 pci_disable_msix(pf->pdev);
4732 kfree(pf->msix_entries);
4733 pf->msix_entries = NULL;
4734 kfree(pf->irq_pile);
4735 pf->irq_pile = NULL;
4736 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4737 pci_disable_msi(pf->pdev);
4739 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4743 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4744 * @pf: board private structure
4746 * We go through and clear interrupt specific resources and reset the structure
4747 * to pre-load conditions
4749 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4753 i40e_free_misc_vector(pf);
4755 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4756 I40E_IWARP_IRQ_PILE_ID);
4758 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4759 for (i = 0; i < pf->num_alloc_vsi; i++)
4761 i40e_vsi_free_q_vectors(pf->vsi[i]);
4762 i40e_reset_interrupt_capability(pf);
4766 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4767 * @vsi: the VSI being configured
4769 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4776 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4777 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4779 if (q_vector->rx.ring || q_vector->tx.ring)
4780 napi_enable(&q_vector->napi);
4785 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4786 * @vsi: the VSI being configured
4788 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4795 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4796 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4798 if (q_vector->rx.ring || q_vector->tx.ring)
4799 napi_disable(&q_vector->napi);
4804 * i40e_vsi_close - Shut down a VSI
4805 * @vsi: the vsi to be quelled
4807 static void i40e_vsi_close(struct i40e_vsi *vsi)
4809 struct i40e_pf *pf = vsi->back;
4810 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4812 i40e_vsi_free_irq(vsi);
4813 i40e_vsi_free_tx_resources(vsi);
4814 i40e_vsi_free_rx_resources(vsi);
4815 vsi->current_netdev_flags = 0;
4816 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4817 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4818 set_bit(__I40E_CLIENT_RESET, pf->state);
4822 * i40e_quiesce_vsi - Pause a given VSI
4823 * @vsi: the VSI being paused
4825 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4827 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4830 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4831 if (vsi->netdev && netif_running(vsi->netdev))
4832 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4834 i40e_vsi_close(vsi);
4838 * i40e_unquiesce_vsi - Resume a given VSI
4839 * @vsi: the VSI being resumed
4841 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4843 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4846 if (vsi->netdev && netif_running(vsi->netdev))
4847 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4849 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4853 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4856 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4860 for (v = 0; v < pf->num_alloc_vsi; v++) {
4862 i40e_quiesce_vsi(pf->vsi[v]);
4867 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4870 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4874 for (v = 0; v < pf->num_alloc_vsi; v++) {
4876 i40e_unquiesce_vsi(pf->vsi[v]);
4881 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4882 * @vsi: the VSI being configured
4884 * Wait until all queues on a given VSI have been disabled.
4886 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4888 struct i40e_pf *pf = vsi->back;
4891 pf_q = vsi->base_queue;
4892 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4893 /* Check and wait for the Tx queue */
4894 ret = i40e_pf_txq_wait(pf, pf_q, false);
4896 dev_info(&pf->pdev->dev,
4897 "VSI seid %d Tx ring %d disable timeout\n",
4902 if (!i40e_enabled_xdp_vsi(vsi))
4905 /* Check and wait for the XDP Tx queue */
4906 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4909 dev_info(&pf->pdev->dev,
4910 "VSI seid %d XDP Tx ring %d disable timeout\n",
4915 /* Check and wait for the Rx queue */
4916 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4918 dev_info(&pf->pdev->dev,
4919 "VSI seid %d Rx ring %d disable timeout\n",
4928 #ifdef CONFIG_I40E_DCB
4930 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4933 * This function waits for the queues to be in disabled state for all the
4934 * VSIs that are managed by this PF.
4936 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4940 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4942 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4954 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4955 * @pf: pointer to PF
4957 * Get TC map for ISCSI PF type that will include iSCSI TC
4960 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4962 struct i40e_dcb_app_priority_table app;
4963 struct i40e_hw *hw = &pf->hw;
4964 u8 enabled_tc = 1; /* TC0 is always enabled */
4966 /* Get the iSCSI APP TLV */
4967 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4969 for (i = 0; i < dcbcfg->numapps; i++) {
4970 app = dcbcfg->app[i];
4971 if (app.selector == I40E_APP_SEL_TCPIP &&
4972 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4973 tc = dcbcfg->etscfg.prioritytable[app.priority];
4974 enabled_tc |= BIT(tc);
4983 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4984 * @dcbcfg: the corresponding DCBx configuration structure
4986 * Return the number of TCs from given DCBx configuration
4988 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4990 int i, tc_unused = 0;
4994 /* Scan the ETS Config Priority Table to find
4995 * traffic class enabled for a given priority
4996 * and create a bitmask of enabled TCs
4998 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4999 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5001 /* Now scan the bitmask to check for
5002 * contiguous TCs starting with TC0
5004 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5005 if (num_tc & BIT(i)) {
5009 pr_err("Non-contiguous TC - Disabling DCB\n");
5017 /* There is always at least TC0 */
5025 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5026 * @dcbcfg: the corresponding DCBx configuration structure
5028 * Query the current DCB configuration and return the number of
5029 * traffic classes enabled from the given DCBX config
5031 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5033 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5037 for (i = 0; i < num_tc; i++)
5038 enabled_tc |= BIT(i);
5044 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5045 * @pf: PF being queried
5047 * Query the current MQPRIO configuration and return the number of
5048 * traffic classes enabled.
5050 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5052 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5053 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5054 u8 enabled_tc = 1, i;
5056 for (i = 1; i < num_tc; i++)
5057 enabled_tc |= BIT(i);
5062 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5063 * @pf: PF being queried
5065 * Return number of traffic classes enabled for the given PF
5067 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5069 struct i40e_hw *hw = &pf->hw;
5070 u8 i, enabled_tc = 1;
5072 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5074 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5075 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5077 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5078 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5081 /* SFP mode will be enabled for all TCs on port */
5082 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5083 return i40e_dcb_get_num_tc(dcbcfg);
5085 /* MFP mode return count of enabled TCs for this PF */
5086 if (pf->hw.func_caps.iscsi)
5087 enabled_tc = i40e_get_iscsi_tc_map(pf);
5089 return 1; /* Only TC0 */
5091 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5092 if (enabled_tc & BIT(i))
5099 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5100 * @pf: PF being queried
5102 * Return a bitmap for enabled traffic classes for this PF.
5104 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5106 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5107 return i40e_mqprio_get_enabled_tc(pf);
5109 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5112 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5113 return I40E_DEFAULT_TRAFFIC_CLASS;
5115 /* SFP mode we want PF to be enabled for all TCs */
5116 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5117 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5119 /* MFP enabled and iSCSI PF type */
5120 if (pf->hw.func_caps.iscsi)
5121 return i40e_get_iscsi_tc_map(pf);
5123 return I40E_DEFAULT_TRAFFIC_CLASS;
5127 * i40e_vsi_get_bw_info - Query VSI BW Information
5128 * @vsi: the VSI being queried
5130 * Returns 0 on success, negative value on failure
5132 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5134 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5135 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5136 struct i40e_pf *pf = vsi->back;
5137 struct i40e_hw *hw = &pf->hw;
5142 /* Get the VSI level BW configuration */
5143 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5145 dev_info(&pf->pdev->dev,
5146 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5147 i40e_stat_str(&pf->hw, ret),
5148 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5152 /* Get the VSI level BW configuration per TC */
5153 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5156 dev_info(&pf->pdev->dev,
5157 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5158 i40e_stat_str(&pf->hw, ret),
5159 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5163 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5164 dev_info(&pf->pdev->dev,
5165 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5166 bw_config.tc_valid_bits,
5167 bw_ets_config.tc_valid_bits);
5168 /* Still continuing */
5171 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5172 vsi->bw_max_quanta = bw_config.max_bw;
5173 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5174 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5175 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5176 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5177 vsi->bw_ets_limit_credits[i] =
5178 le16_to_cpu(bw_ets_config.credits[i]);
5179 /* 3 bits out of 4 for each TC */
5180 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5187 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5188 * @vsi: the VSI being configured
5189 * @enabled_tc: TC bitmap
5190 * @bw_share: BW shared credits per TC
5192 * Returns 0 on success, negative value on failure
5194 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5197 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5198 struct i40e_pf *pf = vsi->back;
5202 /* There is no need to reset BW when mqprio mode is on. */
5203 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5205 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5206 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5208 dev_info(&pf->pdev->dev,
5209 "Failed to reset tx rate for vsi->seid %u\n",
5213 bw_data.tc_valid_bits = enabled_tc;
5214 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5215 bw_data.tc_bw_credits[i] = bw_share[i];
5217 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5219 dev_info(&pf->pdev->dev,
5220 "AQ command Config VSI BW allocation per TC failed = %d\n",
5221 pf->hw.aq.asq_last_status);
5225 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5226 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5232 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5233 * @vsi: the VSI being configured
5234 * @enabled_tc: TC map to be enabled
5237 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5239 struct net_device *netdev = vsi->netdev;
5240 struct i40e_pf *pf = vsi->back;
5241 struct i40e_hw *hw = &pf->hw;
5244 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5250 netdev_reset_tc(netdev);
5254 /* Set up actual enabled TCs on the VSI */
5255 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5258 /* set per TC queues for the VSI */
5259 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5260 /* Only set TC queues for enabled tcs
5262 * e.g. For a VSI that has TC0 and TC3 enabled the
5263 * enabled_tc bitmap would be 0x00001001; the driver
5264 * will set the numtc for netdev as 2 that will be
5265 * referenced by the netdev layer as TC 0 and 1.
5267 if (vsi->tc_config.enabled_tc & BIT(i))
5268 netdev_set_tc_queue(netdev,
5269 vsi->tc_config.tc_info[i].netdev_tc,
5270 vsi->tc_config.tc_info[i].qcount,
5271 vsi->tc_config.tc_info[i].qoffset);
5274 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5277 /* Assign UP2TC map for the VSI */
5278 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5279 /* Get the actual TC# for the UP */
5280 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5281 /* Get the mapped netdev TC# for the UP */
5282 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5283 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5288 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5289 * @vsi: the VSI being configured
5290 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5292 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5293 struct i40e_vsi_context *ctxt)
5295 /* copy just the sections touched not the entire info
5296 * since not all sections are valid as returned by
5299 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5300 memcpy(&vsi->info.queue_mapping,
5301 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5302 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5303 sizeof(vsi->info.tc_mapping));
5307 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5308 * @vsi: VSI to be configured
5309 * @enabled_tc: TC bitmap
5311 * This configures a particular VSI for TCs that are mapped to the
5312 * given TC bitmap. It uses default bandwidth share for TCs across
5313 * VSIs to configure TC for a particular VSI.
5316 * It is expected that the VSI queues have been quisced before calling
5319 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5321 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5322 struct i40e_pf *pf = vsi->back;
5323 struct i40e_hw *hw = &pf->hw;
5324 struct i40e_vsi_context ctxt;
5328 /* Check if enabled_tc is same as existing or new TCs */
5329 if (vsi->tc_config.enabled_tc == enabled_tc &&
5330 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5333 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5334 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5335 if (enabled_tc & BIT(i))
5339 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5341 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5343 dev_info(&pf->pdev->dev,
5344 "Failed configuring TC map %d for VSI %d\n",
5345 enabled_tc, vsi->seid);
5346 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5349 dev_info(&pf->pdev->dev,
5350 "Failed querying vsi bw info, err %s aq_err %s\n",
5351 i40e_stat_str(hw, ret),
5352 i40e_aq_str(hw, hw->aq.asq_last_status));
5355 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5356 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5359 valid_tc = bw_config.tc_valid_bits;
5360 /* Always enable TC0, no matter what */
5362 dev_info(&pf->pdev->dev,
5363 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5364 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5365 enabled_tc = valid_tc;
5368 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5370 dev_err(&pf->pdev->dev,
5371 "Unable to configure TC map %d for VSI %d\n",
5372 enabled_tc, vsi->seid);
5377 /* Update Queue Pairs Mapping for currently enabled UPs */
5378 ctxt.seid = vsi->seid;
5379 ctxt.pf_num = vsi->back->hw.pf_id;
5381 ctxt.uplink_seid = vsi->uplink_seid;
5382 ctxt.info = vsi->info;
5383 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5384 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5388 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5391 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5394 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5395 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5396 vsi->num_queue_pairs);
5397 ret = i40e_vsi_config_rss(vsi);
5399 dev_info(&vsi->back->pdev->dev,
5400 "Failed to reconfig rss for num_queues\n");
5403 vsi->reconfig_rss = false;
5405 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5406 ctxt.info.valid_sections |=
5407 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5408 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5411 /* Update the VSI after updating the VSI queue-mapping
5414 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5416 dev_info(&pf->pdev->dev,
5417 "Update vsi tc config failed, err %s aq_err %s\n",
5418 i40e_stat_str(hw, ret),
5419 i40e_aq_str(hw, hw->aq.asq_last_status));
5422 /* update the local VSI info with updated queue map */
5423 i40e_vsi_update_queue_map(vsi, &ctxt);
5424 vsi->info.valid_sections = 0;
5426 /* Update current VSI BW information */
5427 ret = i40e_vsi_get_bw_info(vsi);
5429 dev_info(&pf->pdev->dev,
5430 "Failed updating vsi bw info, err %s aq_err %s\n",
5431 i40e_stat_str(hw, ret),
5432 i40e_aq_str(hw, hw->aq.asq_last_status));
5436 /* Update the netdev TC setup */
5437 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5443 * i40e_get_link_speed - Returns link speed for the interface
5444 * @vsi: VSI to be configured
5447 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5449 struct i40e_pf *pf = vsi->back;
5451 switch (pf->hw.phy.link_info.link_speed) {
5452 case I40E_LINK_SPEED_40GB:
5454 case I40E_LINK_SPEED_25GB:
5456 case I40E_LINK_SPEED_20GB:
5458 case I40E_LINK_SPEED_10GB:
5460 case I40E_LINK_SPEED_1GB:
5468 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5469 * @vsi: VSI to be configured
5470 * @seid: seid of the channel/VSI
5471 * @max_tx_rate: max TX rate to be configured as BW limit
5473 * Helper function to set BW limit for a given VSI
5475 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5477 struct i40e_pf *pf = vsi->back;
5482 speed = i40e_get_link_speed(vsi);
5483 if (max_tx_rate > speed) {
5484 dev_err(&pf->pdev->dev,
5485 "Invalid max tx rate %llu specified for VSI seid %d.",
5489 if (max_tx_rate && max_tx_rate < 50) {
5490 dev_warn(&pf->pdev->dev,
5491 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5495 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5496 credits = max_tx_rate;
5497 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5498 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5499 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5501 dev_err(&pf->pdev->dev,
5502 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5503 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5504 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5509 * i40e_remove_queue_channels - Remove queue channels for the TCs
5510 * @vsi: VSI to be configured
5512 * Remove queue channels for the TCs
5514 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5516 enum i40e_admin_queue_err last_aq_status;
5517 struct i40e_cloud_filter *cfilter;
5518 struct i40e_channel *ch, *ch_tmp;
5519 struct i40e_pf *pf = vsi->back;
5520 struct hlist_node *node;
5523 /* Reset rss size that was stored when reconfiguring rss for
5524 * channel VSIs with non-power-of-2 queue count.
5526 vsi->current_rss_size = 0;
5528 /* perform cleanup for channels if they exist */
5529 if (list_empty(&vsi->ch_list))
5532 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5533 struct i40e_vsi *p_vsi;
5535 list_del(&ch->list);
5536 p_vsi = ch->parent_vsi;
5537 if (!p_vsi || !ch->initialized) {
5541 /* Reset queue contexts */
5542 for (i = 0; i < ch->num_queue_pairs; i++) {
5543 struct i40e_ring *tx_ring, *rx_ring;
5546 pf_q = ch->base_queue + i;
5547 tx_ring = vsi->tx_rings[pf_q];
5550 rx_ring = vsi->rx_rings[pf_q];
5554 /* Reset BW configured for this VSI via mqprio */
5555 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5557 dev_info(&vsi->back->pdev->dev,
5558 "Failed to reset tx rate for ch->seid %u\n",
5561 /* delete cloud filters associated with this channel */
5562 hlist_for_each_entry_safe(cfilter, node,
5563 &pf->cloud_filter_list, cloud_node) {
5564 if (cfilter->seid != ch->seid)
5567 hash_del(&cfilter->cloud_node);
5568 if (cfilter->dst_port)
5569 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5573 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5575 last_aq_status = pf->hw.aq.asq_last_status;
5577 dev_info(&pf->pdev->dev,
5578 "Failed to delete cloud filter, err %s aq_err %s\n",
5579 i40e_stat_str(&pf->hw, ret),
5580 i40e_aq_str(&pf->hw, last_aq_status));
5584 /* delete VSI from FW */
5585 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5588 dev_err(&vsi->back->pdev->dev,
5589 "unable to remove channel (%d) for parent VSI(%d)\n",
5590 ch->seid, p_vsi->seid);
5593 INIT_LIST_HEAD(&vsi->ch_list);
5597 * i40e_is_any_channel - channel exist or not
5598 * @vsi: ptr to VSI to which channels are associated with
5600 * Returns true or false if channel(s) exist for associated VSI or not
5602 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5604 struct i40e_channel *ch, *ch_tmp;
5606 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5607 if (ch->initialized)
5615 * i40e_get_max_queues_for_channel
5616 * @vsi: ptr to VSI to which channels are associated with
5618 * Helper function which returns max value among the queue counts set on the
5619 * channels/TCs created.
5621 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5623 struct i40e_channel *ch, *ch_tmp;
5626 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5627 if (!ch->initialized)
5629 if (ch->num_queue_pairs > max)
5630 max = ch->num_queue_pairs;
5637 * i40e_validate_num_queues - validate num_queues w.r.t channel
5638 * @pf: ptr to PF device
5639 * @num_queues: number of queues
5640 * @vsi: the parent VSI
5641 * @reconfig_rss: indicates should the RSS be reconfigured or not
5643 * This function validates number of queues in the context of new channel
5644 * which is being established and determines if RSS should be reconfigured
5645 * or not for parent VSI.
5647 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5648 struct i40e_vsi *vsi, bool *reconfig_rss)
5655 *reconfig_rss = false;
5656 if (vsi->current_rss_size) {
5657 if (num_queues > vsi->current_rss_size) {
5658 dev_dbg(&pf->pdev->dev,
5659 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5660 num_queues, vsi->current_rss_size);
5662 } else if ((num_queues < vsi->current_rss_size) &&
5663 (!is_power_of_2(num_queues))) {
5664 dev_dbg(&pf->pdev->dev,
5665 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5666 num_queues, vsi->current_rss_size);
5671 if (!is_power_of_2(num_queues)) {
5672 /* Find the max num_queues configured for channel if channel
5674 * if channel exist, then enforce 'num_queues' to be more than
5675 * max ever queues configured for channel.
5677 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5678 if (num_queues < max_ch_queues) {
5679 dev_dbg(&pf->pdev->dev,
5680 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5681 num_queues, max_ch_queues);
5684 *reconfig_rss = true;
5691 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5692 * @vsi: the VSI being setup
5693 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5695 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5697 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5699 struct i40e_pf *pf = vsi->back;
5700 u8 seed[I40E_HKEY_ARRAY_SIZE];
5701 struct i40e_hw *hw = &pf->hw;
5709 if (rss_size > vsi->rss_size)
5712 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5713 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5717 /* Ignoring user configured lut if there is one */
5718 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5720 /* Use user configured hash key if there is one, otherwise
5723 if (vsi->rss_hkey_user)
5724 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5726 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5728 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5730 dev_info(&pf->pdev->dev,
5731 "Cannot set RSS lut, err %s aq_err %s\n",
5732 i40e_stat_str(hw, ret),
5733 i40e_aq_str(hw, hw->aq.asq_last_status));
5739 /* Do the update w.r.t. storing rss_size */
5740 if (!vsi->orig_rss_size)
5741 vsi->orig_rss_size = vsi->rss_size;
5742 vsi->current_rss_size = local_rss_size;
5748 * i40e_channel_setup_queue_map - Setup a channel queue map
5749 * @pf: ptr to PF device
5750 * @vsi: the VSI being setup
5751 * @ctxt: VSI context structure
5752 * @ch: ptr to channel structure
5754 * Setup queue map for a specific channel
5756 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5757 struct i40e_vsi_context *ctxt,
5758 struct i40e_channel *ch)
5760 u16 qcount, qmap, sections = 0;
5764 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5765 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5767 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5768 ch->num_queue_pairs = qcount;
5770 /* find the next higher power-of-2 of num queue pairs */
5771 pow = ilog2(qcount);
5772 if (!is_power_of_2(qcount))
5775 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5776 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5778 /* Setup queue TC[0].qmap for given VSI context */
5779 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5781 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5782 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5783 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5784 ctxt->info.valid_sections |= cpu_to_le16(sections);
5788 * i40e_add_channel - add a channel by adding VSI
5789 * @pf: ptr to PF device
5790 * @uplink_seid: underlying HW switching element (VEB) ID
5791 * @ch: ptr to channel structure
5793 * Add a channel (VSI) using add_vsi and queue_map
5795 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5796 struct i40e_channel *ch)
5798 struct i40e_hw *hw = &pf->hw;
5799 struct i40e_vsi_context ctxt;
5800 u8 enabled_tc = 0x1; /* TC0 enabled */
5803 if (ch->type != I40E_VSI_VMDQ2) {
5804 dev_info(&pf->pdev->dev,
5805 "add new vsi failed, ch->type %d\n", ch->type);
5809 memset(&ctxt, 0, sizeof(ctxt));
5810 ctxt.pf_num = hw->pf_id;
5812 ctxt.uplink_seid = uplink_seid;
5813 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5814 if (ch->type == I40E_VSI_VMDQ2)
5815 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5817 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5818 ctxt.info.valid_sections |=
5819 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5820 ctxt.info.switch_id =
5821 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5824 /* Set queue map for a given VSI context */
5825 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5827 /* Now time to create VSI */
5828 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5830 dev_info(&pf->pdev->dev,
5831 "add new vsi failed, err %s aq_err %s\n",
5832 i40e_stat_str(&pf->hw, ret),
5833 i40e_aq_str(&pf->hw,
5834 pf->hw.aq.asq_last_status));
5838 /* Success, update channel */
5839 ch->enabled_tc = enabled_tc;
5840 ch->seid = ctxt.seid;
5841 ch->vsi_number = ctxt.vsi_number;
5842 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5844 /* copy just the sections touched not the entire info
5845 * since not all sections are valid as returned by
5848 ch->info.mapping_flags = ctxt.info.mapping_flags;
5849 memcpy(&ch->info.queue_mapping,
5850 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5851 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5852 sizeof(ctxt.info.tc_mapping));
5857 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5860 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5864 bw_data.tc_valid_bits = ch->enabled_tc;
5865 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5866 bw_data.tc_bw_credits[i] = bw_share[i];
5868 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5871 dev_info(&vsi->back->pdev->dev,
5872 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5873 vsi->back->hw.aq.asq_last_status, ch->seid);
5877 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5878 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5884 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5885 * @pf: ptr to PF device
5886 * @vsi: the VSI being setup
5887 * @ch: ptr to channel structure
5889 * Configure TX rings associated with channel (VSI) since queues are being
5892 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5893 struct i40e_vsi *vsi,
5894 struct i40e_channel *ch)
5898 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5900 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5901 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5902 if (ch->enabled_tc & BIT(i))
5906 /* configure BW for new VSI */
5907 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5909 dev_info(&vsi->back->pdev->dev,
5910 "Failed configuring TC map %d for channel (seid %u)\n",
5911 ch->enabled_tc, ch->seid);
5915 for (i = 0; i < ch->num_queue_pairs; i++) {
5916 struct i40e_ring *tx_ring, *rx_ring;
5919 pf_q = ch->base_queue + i;
5921 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5924 tx_ring = vsi->tx_rings[pf_q];
5927 /* Get the RX ring ptr */
5928 rx_ring = vsi->rx_rings[pf_q];
5936 * i40e_setup_hw_channel - setup new channel
5937 * @pf: ptr to PF device
5938 * @vsi: the VSI being setup
5939 * @ch: ptr to channel structure
5940 * @uplink_seid: underlying HW switching element (VEB) ID
5941 * @type: type of channel to be created (VMDq2/VF)
5943 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5944 * and configures TX rings accordingly
5946 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5947 struct i40e_vsi *vsi,
5948 struct i40e_channel *ch,
5949 u16 uplink_seid, u8 type)
5953 ch->initialized = false;
5954 ch->base_queue = vsi->next_base_queue;
5957 /* Proceed with creation of channel (VMDq2) VSI */
5958 ret = i40e_add_channel(pf, uplink_seid, ch);
5960 dev_info(&pf->pdev->dev,
5961 "failed to add_channel using uplink_seid %u\n",
5966 /* Mark the successful creation of channel */
5967 ch->initialized = true;
5969 /* Reconfigure TX queues using QTX_CTL register */
5970 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
5972 dev_info(&pf->pdev->dev,
5973 "failed to configure TX rings for channel %u\n",
5978 /* update 'next_base_queue' */
5979 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
5980 dev_dbg(&pf->pdev->dev,
5981 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5982 ch->seid, ch->vsi_number, ch->stat_counter_idx,
5983 ch->num_queue_pairs,
5984 vsi->next_base_queue);
5989 * i40e_setup_channel - setup new channel using uplink element
5990 * @pf: ptr to PF device
5991 * @type: type of channel to be created (VMDq2/VF)
5992 * @uplink_seid: underlying HW switching element (VEB) ID
5993 * @ch: ptr to channel structure
5995 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5996 * and uplink switching element (uplink_seid)
5998 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
5999 struct i40e_channel *ch)
6005 if (vsi->type == I40E_VSI_MAIN) {
6006 vsi_type = I40E_VSI_VMDQ2;
6008 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6013 /* underlying switching element */
6014 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6016 /* create channel (VSI), configure TX rings */
6017 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
6019 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6023 return ch->initialized ? true : false;
6027 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6028 * @vsi: ptr to VSI which has PF backing
6030 * Sets up switch mode correctly if it needs to be changed and perform
6031 * what are allowed modes.
6033 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6036 struct i40e_pf *pf = vsi->back;
6037 struct i40e_hw *hw = &pf->hw;
6040 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6044 if (hw->dev_caps.switch_mode) {
6045 /* if switch mode is set, support mode2 (non-tunneled for
6046 * cloud filter) for now
6048 u32 switch_mode = hw->dev_caps.switch_mode &
6049 I40E_SWITCH_MODE_MASK;
6050 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6051 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6053 dev_err(&pf->pdev->dev,
6054 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6055 hw->dev_caps.switch_mode);
6060 /* Set Bit 7 to be valid */
6061 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6063 /* Set L4type for TCP support */
6064 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6066 /* Set cloud filter mode */
6067 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6069 /* Prep mode field for set_switch_config */
6070 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6071 pf->last_sw_conf_valid_flags,
6073 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6074 dev_err(&pf->pdev->dev,
6075 "couldn't set switch config bits, err %s aq_err %s\n",
6076 i40e_stat_str(hw, ret),
6078 hw->aq.asq_last_status));
6084 * i40e_create_queue_channel - function to create channel
6085 * @vsi: VSI to be configured
6086 * @ch: ptr to channel (it contains channel specific params)
6088 * This function creates channel (VSI) using num_queues specified by user,
6089 * reconfigs RSS if needed.
6091 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6092 struct i40e_channel *ch)
6094 struct i40e_pf *pf = vsi->back;
6101 if (!ch->num_queue_pairs) {
6102 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6103 ch->num_queue_pairs);
6107 /* validate user requested num_queues for channel */
6108 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6111 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6112 ch->num_queue_pairs);
6116 /* By default we are in VEPA mode, if this is the first VF/VMDq
6117 * VSI to be added switch to VEB mode.
6119 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6120 (!i40e_is_any_channel(vsi))) {
6121 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6122 dev_dbg(&pf->pdev->dev,
6123 "Failed to create channel. Override queues (%u) not power of 2\n",
6124 vsi->tc_config.tc_info[0].qcount);
6128 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6129 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6131 if (vsi->type == I40E_VSI_MAIN) {
6132 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6133 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6136 i40e_do_reset_safe(pf,
6137 I40E_PF_RESET_FLAG);
6140 /* now onwards for main VSI, number of queues will be value
6141 * of TC0's queue count
6145 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6146 * it should be more than num_queues
6148 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6149 dev_dbg(&pf->pdev->dev,
6150 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6151 vsi->cnt_q_avail, ch->num_queue_pairs);
6155 /* reconfig_rss only if vsi type is MAIN_VSI */
6156 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6157 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6159 dev_info(&pf->pdev->dev,
6160 "Error: unable to reconfig rss for num_queues (%u)\n",
6161 ch->num_queue_pairs);
6166 if (!i40e_setup_channel(pf, vsi, ch)) {
6167 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6171 dev_info(&pf->pdev->dev,
6172 "Setup channel (id:%u) utilizing num_queues %d\n",
6173 ch->seid, ch->num_queue_pairs);
6175 /* configure VSI for BW limit */
6176 if (ch->max_tx_rate) {
6177 u64 credits = ch->max_tx_rate;
6179 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6182 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6183 dev_dbg(&pf->pdev->dev,
6184 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6190 /* in case of VF, this will be main SRIOV VSI */
6191 ch->parent_vsi = vsi;
6193 /* and update main_vsi's count for queue_available to use */
6194 vsi->cnt_q_avail -= ch->num_queue_pairs;
6200 * i40e_configure_queue_channels - Add queue channel for the given TCs
6201 * @vsi: VSI to be configured
6203 * Configures queue channel mapping to the given TCs
6205 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6207 struct i40e_channel *ch;
6211 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6212 vsi->tc_seid_map[0] = vsi->seid;
6213 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6214 if (vsi->tc_config.enabled_tc & BIT(i)) {
6215 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6221 INIT_LIST_HEAD(&ch->list);
6222 ch->num_queue_pairs =
6223 vsi->tc_config.tc_info[i].qcount;
6225 vsi->tc_config.tc_info[i].qoffset;
6227 /* Bandwidth limit through tc interface is in bytes/s,
6230 max_rate = vsi->mqprio_qopt.max_rate[i];
6231 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6232 ch->max_tx_rate = max_rate;
6234 list_add_tail(&ch->list, &vsi->ch_list);
6236 ret = i40e_create_queue_channel(vsi, ch);
6238 dev_err(&vsi->back->pdev->dev,
6239 "Failed creating queue channel with TC%d: queues %d\n",
6240 i, ch->num_queue_pairs);
6243 vsi->tc_seid_map[i] = ch->seid;
6249 i40e_remove_queue_channels(vsi);
6254 * i40e_veb_config_tc - Configure TCs for given VEB
6256 * @enabled_tc: TC bitmap
6258 * Configures given TC bitmap for VEB (switching) element
6260 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6262 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6263 struct i40e_pf *pf = veb->pf;
6267 /* No TCs or already enabled TCs just return */
6268 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6271 bw_data.tc_valid_bits = enabled_tc;
6272 /* bw_data.absolute_credits is not set (relative) */
6274 /* Enable ETS TCs with equal BW Share for now */
6275 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6276 if (enabled_tc & BIT(i))
6277 bw_data.tc_bw_share_credits[i] = 1;
6280 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6283 dev_info(&pf->pdev->dev,
6284 "VEB bw config failed, err %s aq_err %s\n",
6285 i40e_stat_str(&pf->hw, ret),
6286 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6290 /* Update the BW information */
6291 ret = i40e_veb_get_bw_info(veb);
6293 dev_info(&pf->pdev->dev,
6294 "Failed getting veb bw config, err %s aq_err %s\n",
6295 i40e_stat_str(&pf->hw, ret),
6296 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6303 #ifdef CONFIG_I40E_DCB
6305 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6308 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6309 * the caller would've quiesce all the VSIs before calling
6312 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6318 /* Enable the TCs available on PF to all VEBs */
6319 tc_map = i40e_pf_get_tc_map(pf);
6320 for (v = 0; v < I40E_MAX_VEB; v++) {
6323 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6325 dev_info(&pf->pdev->dev,
6326 "Failed configuring TC for VEB seid=%d\n",
6328 /* Will try to configure as many components */
6332 /* Update each VSI */
6333 for (v = 0; v < pf->num_alloc_vsi; v++) {
6337 /* - Enable all TCs for the LAN VSI
6338 * - For all others keep them at TC0 for now
6340 if (v == pf->lan_vsi)
6341 tc_map = i40e_pf_get_tc_map(pf);
6343 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6345 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6347 dev_info(&pf->pdev->dev,
6348 "Failed configuring TC for VSI seid=%d\n",
6350 /* Will try to configure as many components */
6352 /* Re-configure VSI vectors based on updated TC map */
6353 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6354 if (pf->vsi[v]->netdev)
6355 i40e_dcbnl_set_all(pf->vsi[v]);
6361 * i40e_resume_port_tx - Resume port Tx
6364 * Resume a port's Tx and issue a PF reset in case of failure to
6367 static int i40e_resume_port_tx(struct i40e_pf *pf)
6369 struct i40e_hw *hw = &pf->hw;
6372 ret = i40e_aq_resume_port_tx(hw, NULL);
6374 dev_info(&pf->pdev->dev,
6375 "Resume Port Tx failed, err %s aq_err %s\n",
6376 i40e_stat_str(&pf->hw, ret),
6377 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6378 /* Schedule PF reset to recover */
6379 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6380 i40e_service_event_schedule(pf);
6387 * i40e_init_pf_dcb - Initialize DCB configuration
6388 * @pf: PF being configured
6390 * Query the current DCB configuration and cache it
6391 * in the hardware structure
6393 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6395 struct i40e_hw *hw = &pf->hw;
6398 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6399 * Also do not enable DCBx if FW LLDP agent is disabled
6401 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6402 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
6405 /* Get the initial DCB configuration */
6406 err = i40e_init_dcb(hw);
6408 /* Device/Function is not DCBX capable */
6409 if ((!hw->func_caps.dcb) ||
6410 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6411 dev_info(&pf->pdev->dev,
6412 "DCBX offload is not supported or is disabled for this PF.\n");
6414 /* When status is not DISABLED then DCBX in FW */
6415 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6416 DCB_CAP_DCBX_VER_IEEE;
6418 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6419 /* Enable DCB tagging only when more than one TC
6420 * or explicitly disable if only one TC
6422 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6423 pf->flags |= I40E_FLAG_DCB_ENABLED;
6425 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6426 dev_dbg(&pf->pdev->dev,
6427 "DCBX offload is supported for this PF.\n");
6429 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6430 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6431 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6433 dev_info(&pf->pdev->dev,
6434 "Query for DCB configuration failed, err %s aq_err %s\n",
6435 i40e_stat_str(&pf->hw, err),
6436 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6442 #endif /* CONFIG_I40E_DCB */
6443 #define SPEED_SIZE 14
6446 * i40e_print_link_message - print link up or down
6447 * @vsi: the VSI for which link needs a message
6448 * @isup: true of link is up, false otherwise
6450 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6452 enum i40e_aq_link_speed new_speed;
6453 struct i40e_pf *pf = vsi->back;
6454 char *speed = "Unknown";
6455 char *fc = "Unknown";
6461 new_speed = pf->hw.phy.link_info.link_speed;
6463 new_speed = I40E_LINK_SPEED_UNKNOWN;
6465 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6467 vsi->current_isup = isup;
6468 vsi->current_speed = new_speed;
6470 netdev_info(vsi->netdev, "NIC Link is Down\n");
6474 /* Warn user if link speed on NPAR enabled partition is not at
6477 if (pf->hw.func_caps.npar_enable &&
6478 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6479 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6480 netdev_warn(vsi->netdev,
6481 "The partition detected link speed that is less than 10Gbps\n");
6483 switch (pf->hw.phy.link_info.link_speed) {
6484 case I40E_LINK_SPEED_40GB:
6487 case I40E_LINK_SPEED_20GB:
6490 case I40E_LINK_SPEED_25GB:
6493 case I40E_LINK_SPEED_10GB:
6496 case I40E_LINK_SPEED_1GB:
6499 case I40E_LINK_SPEED_100MB:
6506 switch (pf->hw.fc.current_mode) {
6510 case I40E_FC_TX_PAUSE:
6513 case I40E_FC_RX_PAUSE:
6521 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6522 req_fec = ", Requested FEC: None";
6523 fec = ", FEC: None";
6524 an = ", Autoneg: False";
6526 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6527 an = ", Autoneg: True";
6529 if (pf->hw.phy.link_info.fec_info &
6530 I40E_AQ_CONFIG_FEC_KR_ENA)
6531 fec = ", FEC: CL74 FC-FEC/BASE-R";
6532 else if (pf->hw.phy.link_info.fec_info &
6533 I40E_AQ_CONFIG_FEC_RS_ENA)
6534 fec = ", FEC: CL108 RS-FEC";
6536 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6537 * both RS and FC are requested
6539 if (vsi->back->hw.phy.link_info.req_fec_info &
6540 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6541 if (vsi->back->hw.phy.link_info.req_fec_info &
6542 I40E_AQ_REQUEST_FEC_RS)
6543 req_fec = ", Requested FEC: CL108 RS-FEC";
6545 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6549 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6550 speed, req_fec, fec, an, fc);
6554 * i40e_up_complete - Finish the last steps of bringing up a connection
6555 * @vsi: the VSI being configured
6557 static int i40e_up_complete(struct i40e_vsi *vsi)
6559 struct i40e_pf *pf = vsi->back;
6562 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6563 i40e_vsi_configure_msix(vsi);
6565 i40e_configure_msi_and_legacy(vsi);
6568 err = i40e_vsi_start_rings(vsi);
6572 clear_bit(__I40E_VSI_DOWN, vsi->state);
6573 i40e_napi_enable_all(vsi);
6574 i40e_vsi_enable_irq(vsi);
6576 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6578 i40e_print_link_message(vsi, true);
6579 netif_tx_start_all_queues(vsi->netdev);
6580 netif_carrier_on(vsi->netdev);
6583 /* replay FDIR SB filters */
6584 if (vsi->type == I40E_VSI_FDIR) {
6585 /* reset fd counters */
6588 i40e_fdir_filter_restore(vsi);
6591 /* On the next run of the service_task, notify any clients of the new
6594 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6595 i40e_service_event_schedule(pf);
6601 * i40e_vsi_reinit_locked - Reset the VSI
6602 * @vsi: the VSI being configured
6604 * Rebuild the ring structs after some configuration
6605 * has changed, e.g. MTU size.
6607 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6609 struct i40e_pf *pf = vsi->back;
6611 WARN_ON(in_interrupt());
6612 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6613 usleep_range(1000, 2000);
6617 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6621 * i40e_up - Bring the connection back up after being down
6622 * @vsi: the VSI being configured
6624 int i40e_up(struct i40e_vsi *vsi)
6628 err = i40e_vsi_configure(vsi);
6630 err = i40e_up_complete(vsi);
6636 * i40e_force_link_state - Force the link status
6637 * @pf: board private structure
6638 * @is_up: whether the link state should be forced up or down
6640 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6642 struct i40e_aq_get_phy_abilities_resp abilities;
6643 struct i40e_aq_set_phy_config config = {0};
6644 struct i40e_hw *hw = &pf->hw;
6649 /* Card might've been put in an unstable state by other drivers
6650 * and applications, which causes incorrect speed values being
6651 * set on startup. In order to clear speed registers, we call
6652 * get_phy_capabilities twice, once to get initial state of
6653 * available speeds, and once to get current PHY config.
6655 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6658 dev_err(&pf->pdev->dev,
6659 "failed to get phy cap., ret = %s last_status = %s\n",
6660 i40e_stat_str(hw, err),
6661 i40e_aq_str(hw, hw->aq.asq_last_status));
6664 speed = abilities.link_speed;
6666 /* Get the current phy config */
6667 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6670 dev_err(&pf->pdev->dev,
6671 "failed to get phy cap., ret = %s last_status = %s\n",
6672 i40e_stat_str(hw, err),
6673 i40e_aq_str(hw, hw->aq.asq_last_status));
6677 /* If link needs to go up, but was not forced to go down,
6678 * and its speed values are OK, no need for a flap
6680 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6681 return I40E_SUCCESS;
6683 /* To force link we need to set bits for all supported PHY types,
6684 * but there are now more than 32, so we need to split the bitmap
6685 * across two fields.
6687 mask = I40E_PHY_TYPES_BITMASK;
6688 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6689 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6690 /* Copy the old settings, except of phy_type */
6691 config.abilities = abilities.abilities;
6692 if (abilities.link_speed != 0)
6693 config.link_speed = abilities.link_speed;
6695 config.link_speed = speed;
6696 config.eee_capability = abilities.eee_capability;
6697 config.eeer = abilities.eeer_val;
6698 config.low_power_ctrl = abilities.d3_lpan;
6699 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6700 I40E_AQ_PHY_FEC_CONFIG_MASK;
6701 err = i40e_aq_set_phy_config(hw, &config, NULL);
6704 dev_err(&pf->pdev->dev,
6705 "set phy config ret = %s last_status = %s\n",
6706 i40e_stat_str(&pf->hw, err),
6707 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6711 /* Update the link info */
6712 err = i40e_update_link_info(hw);
6714 /* Wait a little bit (on 40G cards it sometimes takes a really
6715 * long time for link to come back from the atomic reset)
6719 i40e_update_link_info(hw);
6722 i40e_aq_set_link_restart_an(hw, true, NULL);
6724 return I40E_SUCCESS;
6728 * i40e_down - Shutdown the connection processing
6729 * @vsi: the VSI being stopped
6731 void i40e_down(struct i40e_vsi *vsi)
6735 /* It is assumed that the caller of this function
6736 * sets the vsi->state __I40E_VSI_DOWN bit.
6739 netif_carrier_off(vsi->netdev);
6740 netif_tx_disable(vsi->netdev);
6742 i40e_vsi_disable_irq(vsi);
6743 i40e_vsi_stop_rings(vsi);
6744 if (vsi->type == I40E_VSI_MAIN &&
6745 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6746 i40e_force_link_state(vsi->back, false);
6747 i40e_napi_disable_all(vsi);
6749 for (i = 0; i < vsi->num_queue_pairs; i++) {
6750 i40e_clean_tx_ring(vsi->tx_rings[i]);
6751 if (i40e_enabled_xdp_vsi(vsi)) {
6752 /* Make sure that in-progress ndo_xdp_xmit
6753 * calls are completed.
6756 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6758 i40e_clean_rx_ring(vsi->rx_rings[i]);
6764 * i40e_validate_mqprio_qopt- validate queue mapping info
6765 * @vsi: the VSI being configured
6766 * @mqprio_qopt: queue parametrs
6768 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6769 struct tc_mqprio_qopt_offload *mqprio_qopt)
6771 u64 sum_max_rate = 0;
6775 if (mqprio_qopt->qopt.offset[0] != 0 ||
6776 mqprio_qopt->qopt.num_tc < 1 ||
6777 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6779 for (i = 0; ; i++) {
6780 if (!mqprio_qopt->qopt.count[i])
6782 if (mqprio_qopt->min_rate[i]) {
6783 dev_err(&vsi->back->pdev->dev,
6784 "Invalid min tx rate (greater than 0) specified\n");
6787 max_rate = mqprio_qopt->max_rate[i];
6788 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6789 sum_max_rate += max_rate;
6791 if (i >= mqprio_qopt->qopt.num_tc - 1)
6793 if (mqprio_qopt->qopt.offset[i + 1] !=
6794 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6797 if (vsi->num_queue_pairs <
6798 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6801 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6802 dev_err(&vsi->back->pdev->dev,
6803 "Invalid max tx rate specified\n");
6810 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6811 * @vsi: the VSI being configured
6813 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6818 /* Only TC0 is enabled */
6819 vsi->tc_config.numtc = 1;
6820 vsi->tc_config.enabled_tc = 1;
6821 qcount = min_t(int, vsi->alloc_queue_pairs,
6822 i40e_pf_get_max_q_per_tc(vsi->back));
6823 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6824 /* For the TC that is not enabled set the offset to to default
6825 * queue and allocate one queue for the given TC.
6827 vsi->tc_config.tc_info[i].qoffset = 0;
6829 vsi->tc_config.tc_info[i].qcount = qcount;
6831 vsi->tc_config.tc_info[i].qcount = 1;
6832 vsi->tc_config.tc_info[i].netdev_tc = 0;
6837 * i40e_setup_tc - configure multiple traffic classes
6838 * @netdev: net device to configure
6839 * @type_data: tc offload data
6841 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
6843 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
6844 struct i40e_netdev_priv *np = netdev_priv(netdev);
6845 struct i40e_vsi *vsi = np->vsi;
6846 struct i40e_pf *pf = vsi->back;
6847 u8 enabled_tc = 0, num_tc, hw;
6848 bool need_reset = false;
6853 num_tc = mqprio_qopt->qopt.num_tc;
6854 hw = mqprio_qopt->qopt.hw;
6855 mode = mqprio_qopt->mode;
6857 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6858 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6862 /* Check if MFP enabled */
6863 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6865 "Configuring TC not supported in MFP mode\n");
6869 case TC_MQPRIO_MODE_DCB:
6870 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6872 /* Check if DCB enabled to continue */
6873 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6875 "DCB is not enabled for adapter\n");
6879 /* Check whether tc count is within enabled limit */
6880 if (num_tc > i40e_pf_get_num_tc(pf)) {
6882 "TC count greater than enabled on link for adapter\n");
6886 case TC_MQPRIO_MODE_CHANNEL:
6887 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6889 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6892 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6894 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6897 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6898 sizeof(*mqprio_qopt));
6899 pf->flags |= I40E_FLAG_TC_MQPRIO;
6900 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6907 /* Generate TC map for number of tc requested */
6908 for (i = 0; i < num_tc; i++)
6909 enabled_tc |= BIT(i);
6911 /* Requesting same TC configuration as already enabled */
6912 if (enabled_tc == vsi->tc_config.enabled_tc &&
6913 mode != TC_MQPRIO_MODE_CHANNEL)
6916 /* Quiesce VSI queues */
6917 i40e_quiesce_vsi(vsi);
6919 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6920 i40e_remove_queue_channels(vsi);
6922 /* Configure VSI for enabled TCs */
6923 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6925 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6931 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
6932 if (vsi->mqprio_qopt.max_rate[0]) {
6933 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
6935 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
6936 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6938 u64 credits = max_tx_rate;
6940 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6941 dev_dbg(&vsi->back->pdev->dev,
6942 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6951 ret = i40e_configure_queue_channels(vsi);
6954 "Failed configuring queue channels\n");
6961 /* Reset the configuration data to defaults, only TC0 is enabled */
6963 i40e_vsi_set_default_tc_config(vsi);
6968 i40e_unquiesce_vsi(vsi);
6973 * i40e_set_cld_element - sets cloud filter element data
6974 * @filter: cloud filter rule
6975 * @cld: ptr to cloud filter element data
6977 * This is helper function to copy data into cloud filter element
6980 i40e_set_cld_element(struct i40e_cloud_filter *filter,
6981 struct i40e_aqc_cloud_filters_element_data *cld)
6986 memset(cld, 0, sizeof(*cld));
6987 ether_addr_copy(cld->outer_mac, filter->dst_mac);
6988 ether_addr_copy(cld->inner_mac, filter->src_mac);
6990 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
6993 if (filter->n_proto == ETH_P_IPV6) {
6994 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6995 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
6997 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
6998 ipa = cpu_to_le32(ipa);
6999 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
7002 ipa = be32_to_cpu(filter->dst_ipv4);
7003 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
7006 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
7008 /* tenant_id is not supported by FW now, once the support is enabled
7009 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
7011 if (filter->tenant_id)
7016 * i40e_add_del_cloud_filter - Add/del cloud filter
7017 * @vsi: pointer to VSI
7018 * @filter: cloud filter rule
7019 * @add: if true, add, if false, delete
7021 * Add or delete a cloud filter for a specific flow spec.
7022 * Returns 0 if the filter were successfully added.
7024 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
7025 struct i40e_cloud_filter *filter, bool add)
7027 struct i40e_aqc_cloud_filters_element_data cld_filter;
7028 struct i40e_pf *pf = vsi->back;
7030 static const u16 flag_table[128] = {
7031 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7032 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7033 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7034 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7035 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7036 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7037 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7038 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7039 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7040 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7041 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7042 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7043 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7044 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7047 if (filter->flags >= ARRAY_SIZE(flag_table))
7048 return I40E_ERR_CONFIG;
7050 /* copy element needed to add cloud filter from filter */
7051 i40e_set_cld_element(filter, &cld_filter);
7053 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7054 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7055 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7057 if (filter->n_proto == ETH_P_IPV6)
7058 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7059 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7061 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7062 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7065 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7068 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7071 dev_dbg(&pf->pdev->dev,
7072 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7073 add ? "add" : "delete", filter->dst_port, ret,
7074 pf->hw.aq.asq_last_status);
7076 dev_info(&pf->pdev->dev,
7077 "%s cloud filter for VSI: %d\n",
7078 add ? "Added" : "Deleted", filter->seid);
7083 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7084 * @vsi: pointer to VSI
7085 * @filter: cloud filter rule
7086 * @add: if true, add, if false, delete
7088 * Add or delete a cloud filter for a specific flow spec using big buffer.
7089 * Returns 0 if the filter were successfully added.
7091 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7092 struct i40e_cloud_filter *filter,
7095 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7096 struct i40e_pf *pf = vsi->back;
7099 /* Both (src/dst) valid mac_addr are not supported */
7100 if ((is_valid_ether_addr(filter->dst_mac) &&
7101 is_valid_ether_addr(filter->src_mac)) ||
7102 (is_multicast_ether_addr(filter->dst_mac) &&
7103 is_multicast_ether_addr(filter->src_mac)))
7106 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7107 * ports are not supported via big buffer now.
7109 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7112 /* adding filter using src_port/src_ip is not supported at this stage */
7113 if (filter->src_port || filter->src_ipv4 ||
7114 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7117 /* copy element needed to add cloud filter from filter */
7118 i40e_set_cld_element(filter, &cld_filter.element);
7120 if (is_valid_ether_addr(filter->dst_mac) ||
7121 is_valid_ether_addr(filter->src_mac) ||
7122 is_multicast_ether_addr(filter->dst_mac) ||
7123 is_multicast_ether_addr(filter->src_mac)) {
7124 /* MAC + IP : unsupported mode */
7125 if (filter->dst_ipv4)
7128 /* since we validated that L4 port must be valid before
7129 * we get here, start with respective "flags" value
7130 * and update if vlan is present or not
7132 cld_filter.element.flags =
7133 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7135 if (filter->vlan_id) {
7136 cld_filter.element.flags =
7137 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7140 } else if (filter->dst_ipv4 ||
7141 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7142 cld_filter.element.flags =
7143 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7144 if (filter->n_proto == ETH_P_IPV6)
7145 cld_filter.element.flags |=
7146 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7148 cld_filter.element.flags |=
7149 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7151 dev_err(&pf->pdev->dev,
7152 "either mac or ip has to be valid for cloud filter\n");
7156 /* Now copy L4 port in Byte 6..7 in general fields */
7157 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7158 be16_to_cpu(filter->dst_port);
7161 /* Validate current device switch mode, change if necessary */
7162 ret = i40e_validate_and_set_switch_mode(vsi);
7164 dev_err(&pf->pdev->dev,
7165 "failed to set switch mode, ret %d\n",
7170 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7173 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7178 dev_dbg(&pf->pdev->dev,
7179 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7180 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7182 dev_info(&pf->pdev->dev,
7183 "%s cloud filter for VSI: %d, L4 port: %d\n",
7184 add ? "add" : "delete", filter->seid,
7185 ntohs(filter->dst_port));
7190 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7191 * @vsi: Pointer to VSI
7192 * @cls_flower: Pointer to struct tc_cls_flower_offload
7193 * @filter: Pointer to cloud filter structure
7196 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7197 struct tc_cls_flower_offload *f,
7198 struct i40e_cloud_filter *filter)
7200 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
7201 struct flow_dissector *dissector = rule->match.dissector;
7202 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7203 struct i40e_pf *pf = vsi->back;
7206 if (dissector->used_keys &
7207 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7208 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7209 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7210 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7211 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7212 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7213 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7214 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7215 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7216 dissector->used_keys);
7220 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7221 struct flow_match_enc_keyid match;
7223 flow_rule_match_enc_keyid(rule, &match);
7224 if (match.mask->keyid != 0)
7225 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7227 filter->tenant_id = be32_to_cpu(match.key->keyid);
7230 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
7231 struct flow_match_basic match;
7233 flow_rule_match_basic(rule, &match);
7234 n_proto_key = ntohs(match.key->n_proto);
7235 n_proto_mask = ntohs(match.mask->n_proto);
7237 if (n_proto_key == ETH_P_ALL) {
7241 filter->n_proto = n_proto_key & n_proto_mask;
7242 filter->ip_proto = match.key->ip_proto;
7245 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7246 struct flow_match_eth_addrs match;
7248 flow_rule_match_eth_addrs(rule, &match);
7250 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7251 if (!is_zero_ether_addr(match.mask->dst)) {
7252 if (is_broadcast_ether_addr(match.mask->dst)) {
7253 field_flags |= I40E_CLOUD_FIELD_OMAC;
7255 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7257 return I40E_ERR_CONFIG;
7261 if (!is_zero_ether_addr(match.mask->src)) {
7262 if (is_broadcast_ether_addr(match.mask->src)) {
7263 field_flags |= I40E_CLOUD_FIELD_IMAC;
7265 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7267 return I40E_ERR_CONFIG;
7270 ether_addr_copy(filter->dst_mac, match.key->dst);
7271 ether_addr_copy(filter->src_mac, match.key->src);
7274 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
7275 struct flow_match_vlan match;
7277 flow_rule_match_vlan(rule, &match);
7278 if (match.mask->vlan_id) {
7279 if (match.mask->vlan_id == VLAN_VID_MASK) {
7280 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7283 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7284 match.mask->vlan_id);
7285 return I40E_ERR_CONFIG;
7289 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
7292 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
7293 struct flow_match_control match;
7295 flow_rule_match_control(rule, &match);
7296 addr_type = match.key->addr_type;
7299 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7300 struct flow_match_ipv4_addrs match;
7302 flow_rule_match_ipv4_addrs(rule, &match);
7303 if (match.mask->dst) {
7304 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
7305 field_flags |= I40E_CLOUD_FIELD_IIP;
7307 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7309 return I40E_ERR_CONFIG;
7313 if (match.mask->src) {
7314 if (match.mask->src == cpu_to_be32(0xffffffff)) {
7315 field_flags |= I40E_CLOUD_FIELD_IIP;
7317 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7319 return I40E_ERR_CONFIG;
7323 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7324 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7325 return I40E_ERR_CONFIG;
7327 filter->dst_ipv4 = match.key->dst;
7328 filter->src_ipv4 = match.key->src;
7331 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7332 struct flow_match_ipv6_addrs match;
7334 flow_rule_match_ipv6_addrs(rule, &match);
7336 /* src and dest IPV6 address should not be LOOPBACK
7337 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7339 if (ipv6_addr_loopback(&match.key->dst) ||
7340 ipv6_addr_loopback(&match.key->src)) {
7341 dev_err(&pf->pdev->dev,
7342 "Bad ipv6, addr is LOOPBACK\n");
7343 return I40E_ERR_CONFIG;
7345 if (!ipv6_addr_any(&match.mask->dst) ||
7346 !ipv6_addr_any(&match.mask->src))
7347 field_flags |= I40E_CLOUD_FIELD_IIP;
7349 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
7350 sizeof(filter->src_ipv6));
7351 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
7352 sizeof(filter->dst_ipv6));
7355 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
7356 struct flow_match_ports match;
7358 flow_rule_match_ports(rule, &match);
7359 if (match.mask->src) {
7360 if (match.mask->src == cpu_to_be16(0xffff)) {
7361 field_flags |= I40E_CLOUD_FIELD_IIP;
7363 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7364 be16_to_cpu(match.mask->src));
7365 return I40E_ERR_CONFIG;
7369 if (match.mask->dst) {
7370 if (match.mask->dst == cpu_to_be16(0xffff)) {
7371 field_flags |= I40E_CLOUD_FIELD_IIP;
7373 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7374 be16_to_cpu(match.mask->dst));
7375 return I40E_ERR_CONFIG;
7379 filter->dst_port = match.key->dst;
7380 filter->src_port = match.key->src;
7382 switch (filter->ip_proto) {
7387 dev_err(&pf->pdev->dev,
7388 "Only UDP and TCP transport are supported\n");
7392 filter->flags = field_flags;
7397 * i40e_handle_tclass: Forward to a traffic class on the device
7398 * @vsi: Pointer to VSI
7399 * @tc: traffic class index on the device
7400 * @filter: Pointer to cloud filter structure
7403 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7404 struct i40e_cloud_filter *filter)
7406 struct i40e_channel *ch, *ch_tmp;
7408 /* direct to a traffic class on the same device */
7410 filter->seid = vsi->seid;
7412 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7413 if (!filter->dst_port) {
7414 dev_err(&vsi->back->pdev->dev,
7415 "Specify destination port to direct to traffic class that is not default\n");
7418 if (list_empty(&vsi->ch_list))
7420 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7422 if (ch->seid == vsi->tc_seid_map[tc])
7423 filter->seid = ch->seid;
7427 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7432 * i40e_configure_clsflower - Configure tc flower filters
7433 * @vsi: Pointer to VSI
7434 * @cls_flower: Pointer to struct tc_cls_flower_offload
7437 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7438 struct tc_cls_flower_offload *cls_flower)
7440 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7441 struct i40e_cloud_filter *filter = NULL;
7442 struct i40e_pf *pf = vsi->back;
7446 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7450 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7451 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7454 if (pf->fdir_pf_active_filters ||
7455 (!hlist_empty(&pf->fdir_filter_list))) {
7456 dev_err(&vsi->back->pdev->dev,
7457 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7461 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7462 dev_err(&vsi->back->pdev->dev,
7463 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7464 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7465 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7468 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7472 filter->cookie = cls_flower->cookie;
7474 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7478 err = i40e_handle_tclass(vsi, tc, filter);
7482 /* Add cloud filter */
7483 if (filter->dst_port)
7484 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7486 err = i40e_add_del_cloud_filter(vsi, filter, true);
7489 dev_err(&pf->pdev->dev,
7490 "Failed to add cloud filter, err %s\n",
7491 i40e_stat_str(&pf->hw, err));
7495 /* add filter to the ordered list */
7496 INIT_HLIST_NODE(&filter->cloud_node);
7498 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7500 pf->num_cloud_filters++;
7509 * i40e_find_cloud_filter - Find the could filter in the list
7510 * @vsi: Pointer to VSI
7511 * @cookie: filter specific cookie
7514 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7515 unsigned long *cookie)
7517 struct i40e_cloud_filter *filter = NULL;
7518 struct hlist_node *node2;
7520 hlist_for_each_entry_safe(filter, node2,
7521 &vsi->back->cloud_filter_list, cloud_node)
7522 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7528 * i40e_delete_clsflower - Remove tc flower filters
7529 * @vsi: Pointer to VSI
7530 * @cls_flower: Pointer to struct tc_cls_flower_offload
7533 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7534 struct tc_cls_flower_offload *cls_flower)
7536 struct i40e_cloud_filter *filter = NULL;
7537 struct i40e_pf *pf = vsi->back;
7540 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
7545 hash_del(&filter->cloud_node);
7547 if (filter->dst_port)
7548 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7550 err = i40e_add_del_cloud_filter(vsi, filter, false);
7554 dev_err(&pf->pdev->dev,
7555 "Failed to delete cloud filter, err %s\n",
7556 i40e_stat_str(&pf->hw, err));
7557 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7560 pf->num_cloud_filters--;
7561 if (!pf->num_cloud_filters)
7562 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7563 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7564 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7565 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7566 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7572 * i40e_setup_tc_cls_flower - flower classifier offloads
7573 * @netdev: net device to configure
7574 * @type_data: offload data
7576 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7577 struct tc_cls_flower_offload *cls_flower)
7579 struct i40e_vsi *vsi = np->vsi;
7581 switch (cls_flower->command) {
7582 case TC_CLSFLOWER_REPLACE:
7583 return i40e_configure_clsflower(vsi, cls_flower);
7584 case TC_CLSFLOWER_DESTROY:
7585 return i40e_delete_clsflower(vsi, cls_flower);
7586 case TC_CLSFLOWER_STATS:
7593 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7596 struct i40e_netdev_priv *np = cb_priv;
7598 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
7602 case TC_SETUP_CLSFLOWER:
7603 return i40e_setup_tc_cls_flower(np, type_data);
7610 static int i40e_setup_tc_block(struct net_device *dev,
7611 struct tc_block_offload *f)
7613 struct i40e_netdev_priv *np = netdev_priv(dev);
7615 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7618 switch (f->command) {
7620 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7622 case TC_BLOCK_UNBIND:
7623 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7630 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7634 case TC_SETUP_QDISC_MQPRIO:
7635 return i40e_setup_tc(netdev, type_data);
7636 case TC_SETUP_BLOCK:
7637 return i40e_setup_tc_block(netdev, type_data);
7644 * i40e_open - Called when a network interface is made active
7645 * @netdev: network interface device structure
7647 * The open entry point is called when a network interface is made
7648 * active by the system (IFF_UP). At this point all resources needed
7649 * for transmit and receive operations are allocated, the interrupt
7650 * handler is registered with the OS, the netdev watchdog subtask is
7651 * enabled, and the stack is notified that the interface is ready.
7653 * Returns 0 on success, negative value on failure
7655 int i40e_open(struct net_device *netdev)
7657 struct i40e_netdev_priv *np = netdev_priv(netdev);
7658 struct i40e_vsi *vsi = np->vsi;
7659 struct i40e_pf *pf = vsi->back;
7662 /* disallow open during test or if eeprom is broken */
7663 if (test_bit(__I40E_TESTING, pf->state) ||
7664 test_bit(__I40E_BAD_EEPROM, pf->state))
7667 netif_carrier_off(netdev);
7669 if (i40e_force_link_state(pf, true))
7672 err = i40e_vsi_open(vsi);
7676 /* configure global TSO hardware offload settings */
7677 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7678 TCP_FLAG_FIN) >> 16);
7679 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7681 TCP_FLAG_CWR) >> 16);
7682 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7684 udp_tunnel_get_rx_info(netdev);
7691 * @vsi: the VSI to open
7693 * Finish initialization of the VSI.
7695 * Returns 0 on success, negative value on failure
7697 * Note: expects to be called while under rtnl_lock()
7699 int i40e_vsi_open(struct i40e_vsi *vsi)
7701 struct i40e_pf *pf = vsi->back;
7702 char int_name[I40E_INT_NAME_STR_LEN];
7705 /* allocate descriptors */
7706 err = i40e_vsi_setup_tx_resources(vsi);
7709 err = i40e_vsi_setup_rx_resources(vsi);
7713 err = i40e_vsi_configure(vsi);
7718 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7719 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7720 err = i40e_vsi_request_irq(vsi, int_name);
7724 /* Notify the stack of the actual queue counts. */
7725 err = netif_set_real_num_tx_queues(vsi->netdev,
7726 vsi->num_queue_pairs);
7728 goto err_set_queues;
7730 err = netif_set_real_num_rx_queues(vsi->netdev,
7731 vsi->num_queue_pairs);
7733 goto err_set_queues;
7735 } else if (vsi->type == I40E_VSI_FDIR) {
7736 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
7737 dev_driver_string(&pf->pdev->dev),
7738 dev_name(&pf->pdev->dev));
7739 err = i40e_vsi_request_irq(vsi, int_name);
7746 err = i40e_up_complete(vsi);
7748 goto err_up_complete;
7755 i40e_vsi_free_irq(vsi);
7757 i40e_vsi_free_rx_resources(vsi);
7759 i40e_vsi_free_tx_resources(vsi);
7760 if (vsi == pf->vsi[pf->lan_vsi])
7761 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7767 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7768 * @pf: Pointer to PF
7770 * This function destroys the hlist where all the Flow Director
7771 * filters were saved.
7773 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7775 struct i40e_fdir_filter *filter;
7776 struct i40e_flex_pit *pit_entry, *tmp;
7777 struct hlist_node *node2;
7779 hlist_for_each_entry_safe(filter, node2,
7780 &pf->fdir_filter_list, fdir_node) {
7781 hlist_del(&filter->fdir_node);
7785 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7786 list_del(&pit_entry->list);
7789 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7791 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7792 list_del(&pit_entry->list);
7795 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7797 pf->fdir_pf_active_filters = 0;
7798 pf->fd_tcp4_filter_cnt = 0;
7799 pf->fd_udp4_filter_cnt = 0;
7800 pf->fd_sctp4_filter_cnt = 0;
7801 pf->fd_ip4_filter_cnt = 0;
7803 /* Reprogram the default input set for TCP/IPv4 */
7804 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7805 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7806 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7808 /* Reprogram the default input set for UDP/IPv4 */
7809 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7810 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7811 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7813 /* Reprogram the default input set for SCTP/IPv4 */
7814 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7815 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7816 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7818 /* Reprogram the default input set for Other/IPv4 */
7819 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7820 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7822 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
7823 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7827 * i40e_cloud_filter_exit - Cleans up the cloud filters
7828 * @pf: Pointer to PF
7830 * This function destroys the hlist where all the cloud filters
7833 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7835 struct i40e_cloud_filter *cfilter;
7836 struct hlist_node *node;
7838 hlist_for_each_entry_safe(cfilter, node,
7839 &pf->cloud_filter_list, cloud_node) {
7840 hlist_del(&cfilter->cloud_node);
7843 pf->num_cloud_filters = 0;
7845 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7846 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7847 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7848 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7849 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7854 * i40e_close - Disables a network interface
7855 * @netdev: network interface device structure
7857 * The close entry point is called when an interface is de-activated
7858 * by the OS. The hardware is still under the driver's control, but
7859 * this netdev interface is disabled.
7861 * Returns 0, this is not allowed to fail
7863 int i40e_close(struct net_device *netdev)
7865 struct i40e_netdev_priv *np = netdev_priv(netdev);
7866 struct i40e_vsi *vsi = np->vsi;
7868 i40e_vsi_close(vsi);
7874 * i40e_do_reset - Start a PF or Core Reset sequence
7875 * @pf: board private structure
7876 * @reset_flags: which reset is requested
7877 * @lock_acquired: indicates whether or not the lock has been acquired
7878 * before this function was called.
7880 * The essential difference in resets is that the PF Reset
7881 * doesn't clear the packet buffers, doesn't reset the PE
7882 * firmware, and doesn't bother the other PFs on the chip.
7884 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
7888 WARN_ON(in_interrupt());
7891 /* do the biggest reset indicated */
7892 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
7894 /* Request a Global Reset
7896 * This will start the chip's countdown to the actual full
7897 * chip reset event, and a warning interrupt to be sent
7898 * to all PFs, including the requestor. Our handler
7899 * for the warning interrupt will deal with the shutdown
7900 * and recovery of the switch setup.
7902 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
7903 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7904 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
7905 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7907 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
7909 /* Request a Core Reset
7911 * Same as Global Reset, except does *not* include the MAC/PHY
7913 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
7914 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7915 val |= I40E_GLGEN_RTRIG_CORER_MASK;
7916 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7917 i40e_flush(&pf->hw);
7919 } else if (reset_flags & I40E_PF_RESET_FLAG) {
7921 /* Request a PF Reset
7923 * Resets only the PF-specific registers
7925 * This goes directly to the tear-down and rebuild of
7926 * the switch, since we need to do all the recovery as
7927 * for the Core Reset.
7929 dev_dbg(&pf->pdev->dev, "PFR requested\n");
7930 i40e_handle_reset_warning(pf, lock_acquired);
7932 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
7935 /* Find the VSI(s) that requested a re-init */
7936 dev_info(&pf->pdev->dev,
7937 "VSI reinit requested\n");
7938 for (v = 0; v < pf->num_alloc_vsi; v++) {
7939 struct i40e_vsi *vsi = pf->vsi[v];
7942 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
7944 i40e_vsi_reinit_locked(pf->vsi[v]);
7946 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
7949 /* Find the VSI(s) that needs to be brought down */
7950 dev_info(&pf->pdev->dev, "VSI down requested\n");
7951 for (v = 0; v < pf->num_alloc_vsi; v++) {
7952 struct i40e_vsi *vsi = pf->vsi[v];
7955 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
7957 set_bit(__I40E_VSI_DOWN, vsi->state);
7962 dev_info(&pf->pdev->dev,
7963 "bad reset request 0x%08x\n", reset_flags);
7967 #ifdef CONFIG_I40E_DCB
7969 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7970 * @pf: board private structure
7971 * @old_cfg: current DCB config
7972 * @new_cfg: new DCB config
7974 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
7975 struct i40e_dcbx_config *old_cfg,
7976 struct i40e_dcbx_config *new_cfg)
7978 bool need_reconfig = false;
7980 /* Check if ETS configuration has changed */
7981 if (memcmp(&new_cfg->etscfg,
7983 sizeof(new_cfg->etscfg))) {
7984 /* If Priority Table has changed reconfig is needed */
7985 if (memcmp(&new_cfg->etscfg.prioritytable,
7986 &old_cfg->etscfg.prioritytable,
7987 sizeof(new_cfg->etscfg.prioritytable))) {
7988 need_reconfig = true;
7989 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
7992 if (memcmp(&new_cfg->etscfg.tcbwtable,
7993 &old_cfg->etscfg.tcbwtable,
7994 sizeof(new_cfg->etscfg.tcbwtable)))
7995 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
7997 if (memcmp(&new_cfg->etscfg.tsatable,
7998 &old_cfg->etscfg.tsatable,
7999 sizeof(new_cfg->etscfg.tsatable)))
8000 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8003 /* Check if PFC configuration has changed */
8004 if (memcmp(&new_cfg->pfc,
8006 sizeof(new_cfg->pfc))) {
8007 need_reconfig = true;
8008 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8011 /* Check if APP Table has changed */
8012 if (memcmp(&new_cfg->app,
8014 sizeof(new_cfg->app))) {
8015 need_reconfig = true;
8016 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8019 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8020 return need_reconfig;
8024 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8025 * @pf: board private structure
8026 * @e: event info posted on ARQ
8028 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8029 struct i40e_arq_event_info *e)
8031 struct i40e_aqc_lldp_get_mib *mib =
8032 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8033 struct i40e_hw *hw = &pf->hw;
8034 struct i40e_dcbx_config tmp_dcbx_cfg;
8035 bool need_reconfig = false;
8039 /* Not DCB capable or capability disabled */
8040 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8043 /* Ignore if event is not for Nearest Bridge */
8044 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8045 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8046 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8047 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8050 /* Check MIB Type and return if event for Remote MIB update */
8051 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8052 dev_dbg(&pf->pdev->dev,
8053 "LLDP event mib type %s\n", type ? "remote" : "local");
8054 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8055 /* Update the remote cached instance and return */
8056 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8057 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8058 &hw->remote_dcbx_config);
8062 /* Store the old configuration */
8063 tmp_dcbx_cfg = hw->local_dcbx_config;
8065 /* Reset the old DCBx configuration data */
8066 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8067 /* Get updated DCBX data from firmware */
8068 ret = i40e_get_dcb_config(&pf->hw);
8070 dev_info(&pf->pdev->dev,
8071 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8072 i40e_stat_str(&pf->hw, ret),
8073 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8077 /* No change detected in DCBX configs */
8078 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8079 sizeof(tmp_dcbx_cfg))) {
8080 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8084 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8085 &hw->local_dcbx_config);
8087 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8092 /* Enable DCB tagging only when more than one TC */
8093 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8094 pf->flags |= I40E_FLAG_DCB_ENABLED;
8096 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8098 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8099 /* Reconfiguration needed quiesce all VSIs */
8100 i40e_pf_quiesce_all_vsi(pf);
8102 /* Changes in configuration update VEB/VSI */
8103 i40e_dcb_reconfigure(pf);
8105 ret = i40e_resume_port_tx(pf);
8107 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8108 /* In case of error no point in resuming VSIs */
8112 /* Wait for the PF's queues to be disabled */
8113 ret = i40e_pf_wait_queues_disabled(pf);
8115 /* Schedule PF reset to recover */
8116 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8117 i40e_service_event_schedule(pf);
8119 i40e_pf_unquiesce_all_vsi(pf);
8120 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8121 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8127 #endif /* CONFIG_I40E_DCB */
8130 * i40e_do_reset_safe - Protected reset path for userland calls.
8131 * @pf: board private structure
8132 * @reset_flags: which reset is requested
8135 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8138 i40e_do_reset(pf, reset_flags, true);
8143 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8144 * @pf: board private structure
8145 * @e: event info posted on ARQ
8147 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8150 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8151 struct i40e_arq_event_info *e)
8153 struct i40e_aqc_lan_overflow *data =
8154 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8155 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8156 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8157 struct i40e_hw *hw = &pf->hw;
8161 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8164 /* Queue belongs to VF, find the VF and issue VF reset */
8165 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8166 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8167 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8168 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8169 vf_id -= hw->func_caps.vf_base_id;
8170 vf = &pf->vf[vf_id];
8171 i40e_vc_notify_vf_reset(vf);
8172 /* Allow VF to process pending reset notification */
8174 i40e_reset_vf(vf, false);
8179 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8180 * @pf: board private structure
8182 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8186 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8187 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8192 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8193 * @pf: board private structure
8195 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8199 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8200 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8201 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8202 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8207 * i40e_get_global_fd_count - Get total FD filters programmed on device
8208 * @pf: board private structure
8210 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8214 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8215 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8216 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8217 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8222 * i40e_reenable_fdir_sb - Restore FDir SB capability
8223 * @pf: board private structure
8225 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8227 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8228 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8229 (I40E_DEBUG_FD & pf->hw.debug_mask))
8230 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8234 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8235 * @pf: board private structure
8237 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8239 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8240 /* ATR uses the same filtering logic as SB rules. It only
8241 * functions properly if the input set mask is at the default
8242 * settings. It is safe to restore the default input set
8243 * because there are no active TCPv4 filter rules.
8245 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8246 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8247 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8249 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8250 (I40E_DEBUG_FD & pf->hw.debug_mask))
8251 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8256 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8257 * @pf: board private structure
8258 * @filter: FDir filter to remove
8260 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8261 struct i40e_fdir_filter *filter)
8263 /* Update counters */
8264 pf->fdir_pf_active_filters--;
8267 switch (filter->flow_type) {
8269 pf->fd_tcp4_filter_cnt--;
8272 pf->fd_udp4_filter_cnt--;
8275 pf->fd_sctp4_filter_cnt--;
8278 switch (filter->ip4_proto) {
8280 pf->fd_tcp4_filter_cnt--;
8283 pf->fd_udp4_filter_cnt--;
8286 pf->fd_sctp4_filter_cnt--;
8289 pf->fd_ip4_filter_cnt--;
8295 /* Remove the filter from the list and free memory */
8296 hlist_del(&filter->fdir_node);
8301 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8302 * @pf: board private structure
8304 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8306 struct i40e_fdir_filter *filter;
8307 u32 fcnt_prog, fcnt_avail;
8308 struct hlist_node *node;
8310 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8313 /* Check if we have enough room to re-enable FDir SB capability. */
8314 fcnt_prog = i40e_get_global_fd_count(pf);
8315 fcnt_avail = pf->fdir_pf_filter_count;
8316 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8317 (pf->fd_add_err == 0) ||
8318 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8319 i40e_reenable_fdir_sb(pf);
8321 /* We should wait for even more space before re-enabling ATR.
8322 * Additionally, we cannot enable ATR as long as we still have TCP SB
8325 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8326 (pf->fd_tcp4_filter_cnt == 0))
8327 i40e_reenable_fdir_atr(pf);
8329 /* if hw had a problem adding a filter, delete it */
8330 if (pf->fd_inv > 0) {
8331 hlist_for_each_entry_safe(filter, node,
8332 &pf->fdir_filter_list, fdir_node)
8333 if (filter->fd_id == pf->fd_inv)
8334 i40e_delete_invalid_filter(pf, filter);
8338 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8339 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8341 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8342 * @pf: board private structure
8344 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8346 unsigned long min_flush_time;
8347 int flush_wait_retry = 50;
8348 bool disable_atr = false;
8352 if (!time_after(jiffies, pf->fd_flush_timestamp +
8353 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8356 /* If the flush is happening too quick and we have mostly SB rules we
8357 * should not re-enable ATR for some time.
8359 min_flush_time = pf->fd_flush_timestamp +
8360 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8361 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8363 if (!(time_after(jiffies, min_flush_time)) &&
8364 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8365 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8366 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8370 pf->fd_flush_timestamp = jiffies;
8371 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8372 /* flush all filters */
8373 wr32(&pf->hw, I40E_PFQF_CTL_1,
8374 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8375 i40e_flush(&pf->hw);
8379 /* Check FD flush status every 5-6msec */
8380 usleep_range(5000, 6000);
8381 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8382 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8384 } while (flush_wait_retry--);
8385 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8386 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8388 /* replay sideband filters */
8389 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8390 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8391 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8392 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8393 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8394 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8399 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8400 * @pf: board private structure
8402 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8404 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8407 /* We can see up to 256 filter programming desc in transit if the filters are
8408 * being applied really fast; before we see the first
8409 * filter miss error on Rx queue 0. Accumulating enough error messages before
8410 * reacting will make sure we don't cause flush too often.
8412 #define I40E_MAX_FD_PROGRAM_ERROR 256
8415 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8416 * @pf: board private structure
8418 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8421 /* if interface is down do nothing */
8422 if (test_bit(__I40E_DOWN, pf->state))
8425 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8426 i40e_fdir_flush_and_replay(pf);
8428 i40e_fdir_check_and_reenable(pf);
8433 * i40e_vsi_link_event - notify VSI of a link event
8434 * @vsi: vsi to be notified
8435 * @link_up: link up or down
8437 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8439 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8442 switch (vsi->type) {
8444 if (!vsi->netdev || !vsi->netdev_registered)
8448 netif_carrier_on(vsi->netdev);
8449 netif_tx_wake_all_queues(vsi->netdev);
8451 netif_carrier_off(vsi->netdev);
8452 netif_tx_stop_all_queues(vsi->netdev);
8456 case I40E_VSI_SRIOV:
8457 case I40E_VSI_VMDQ2:
8459 case I40E_VSI_IWARP:
8460 case I40E_VSI_MIRROR:
8462 /* there is no notification for other VSIs */
8468 * i40e_veb_link_event - notify elements on the veb of a link event
8469 * @veb: veb to be notified
8470 * @link_up: link up or down
8472 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8477 if (!veb || !veb->pf)
8481 /* depth first... */
8482 for (i = 0; i < I40E_MAX_VEB; i++)
8483 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8484 i40e_veb_link_event(pf->veb[i], link_up);
8486 /* ... now the local VSIs */
8487 for (i = 0; i < pf->num_alloc_vsi; i++)
8488 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8489 i40e_vsi_link_event(pf->vsi[i], link_up);
8493 * i40e_link_event - Update netif_carrier status
8494 * @pf: board private structure
8496 static void i40e_link_event(struct i40e_pf *pf)
8498 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8499 u8 new_link_speed, old_link_speed;
8501 bool new_link, old_link;
8503 /* set this to force the get_link_status call to refresh state */
8504 pf->hw.phy.get_link_info = true;
8505 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8506 status = i40e_get_link_status(&pf->hw, &new_link);
8508 /* On success, disable temp link polling */
8509 if (status == I40E_SUCCESS) {
8510 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8512 /* Enable link polling temporarily until i40e_get_link_status
8513 * returns I40E_SUCCESS
8515 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8516 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8521 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8522 new_link_speed = pf->hw.phy.link_info.link_speed;
8524 if (new_link == old_link &&
8525 new_link_speed == old_link_speed &&
8526 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
8527 new_link == netif_carrier_ok(vsi->netdev)))
8530 i40e_print_link_message(vsi, new_link);
8532 /* Notify the base of the switch tree connected to
8533 * the link. Floating VEBs are not notified.
8535 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8536 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8538 i40e_vsi_link_event(vsi, new_link);
8541 i40e_vc_notify_link_state(pf);
8543 if (pf->flags & I40E_FLAG_PTP)
8544 i40e_ptp_set_increment(pf);
8548 * i40e_watchdog_subtask - periodic checks not using event driven response
8549 * @pf: board private structure
8551 static void i40e_watchdog_subtask(struct i40e_pf *pf)
8555 /* if interface is down do nothing */
8556 if (test_bit(__I40E_DOWN, pf->state) ||
8557 test_bit(__I40E_CONFIG_BUSY, pf->state))
8560 /* make sure we don't do these things too often */
8561 if (time_before(jiffies, (pf->service_timer_previous +
8562 pf->service_timer_period)))
8564 pf->service_timer_previous = jiffies;
8566 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
8567 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
8568 i40e_link_event(pf);
8570 /* Update the stats for active netdevs so the network stack
8571 * can look at updated numbers whenever it cares to
8573 for (i = 0; i < pf->num_alloc_vsi; i++)
8574 if (pf->vsi[i] && pf->vsi[i]->netdev)
8575 i40e_update_stats(pf->vsi[i]);
8577 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8578 /* Update the stats for the active switching components */
8579 for (i = 0; i < I40E_MAX_VEB; i++)
8581 i40e_update_veb_stats(pf->veb[i]);
8584 i40e_ptp_rx_hang(pf);
8585 i40e_ptp_tx_hang(pf);
8589 * i40e_reset_subtask - Set up for resetting the device and driver
8590 * @pf: board private structure
8592 static void i40e_reset_subtask(struct i40e_pf *pf)
8594 u32 reset_flags = 0;
8596 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
8597 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
8598 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
8600 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
8601 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
8602 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8604 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
8605 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
8606 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
8608 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
8609 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
8610 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
8612 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8613 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8614 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
8617 /* If there's a recovery already waiting, it takes
8618 * precedence before starting a new reset sequence.
8620 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
8621 i40e_prep_for_reset(pf, false);
8623 i40e_rebuild(pf, false, false);
8626 /* If we're already down or resetting, just bail */
8628 !test_bit(__I40E_DOWN, pf->state) &&
8629 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
8630 i40e_do_reset(pf, reset_flags, false);
8635 * i40e_handle_link_event - Handle link event
8636 * @pf: board private structure
8637 * @e: event info posted on ARQ
8639 static void i40e_handle_link_event(struct i40e_pf *pf,
8640 struct i40e_arq_event_info *e)
8642 struct i40e_aqc_get_link_status *status =
8643 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
8645 /* Do a new status request to re-enable LSE reporting
8646 * and load new status information into the hw struct
8647 * This completely ignores any state information
8648 * in the ARQ event info, instead choosing to always
8649 * issue the AQ update link status command.
8651 i40e_link_event(pf);
8653 /* Check if module meets thermal requirements */
8654 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
8655 dev_err(&pf->pdev->dev,
8656 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8657 dev_err(&pf->pdev->dev,
8658 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8660 /* check for unqualified module, if link is down, suppress
8661 * the message if link was forced to be down.
8663 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8664 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8665 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8666 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8667 dev_err(&pf->pdev->dev,
8668 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8669 dev_err(&pf->pdev->dev,
8670 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8676 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8677 * @pf: board private structure
8679 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8681 struct i40e_arq_event_info event;
8682 struct i40e_hw *hw = &pf->hw;
8689 /* Do not run clean AQ when PF reset fails */
8690 if (test_bit(__I40E_RESET_FAILED, pf->state))
8693 /* check for error indications */
8694 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8696 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
8697 if (hw->debug_mask & I40E_DEBUG_AQ)
8698 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
8699 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8701 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
8702 if (hw->debug_mask & I40E_DEBUG_AQ)
8703 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
8704 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
8705 pf->arq_overflows++;
8707 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
8708 if (hw->debug_mask & I40E_DEBUG_AQ)
8709 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
8710 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8713 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8715 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8717 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
8718 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8719 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
8720 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8722 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
8723 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8724 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
8725 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8727 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
8728 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8729 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
8730 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8733 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8735 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8736 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
8741 ret = i40e_clean_arq_element(hw, &event, &pending);
8742 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
8745 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8749 opcode = le16_to_cpu(event.desc.opcode);
8752 case i40e_aqc_opc_get_link_status:
8753 i40e_handle_link_event(pf, &event);
8755 case i40e_aqc_opc_send_msg_to_pf:
8756 ret = i40e_vc_process_vf_msg(pf,
8757 le16_to_cpu(event.desc.retval),
8758 le32_to_cpu(event.desc.cookie_high),
8759 le32_to_cpu(event.desc.cookie_low),
8763 case i40e_aqc_opc_lldp_update_mib:
8764 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
8765 #ifdef CONFIG_I40E_DCB
8767 ret = i40e_handle_lldp_event(pf, &event);
8769 #endif /* CONFIG_I40E_DCB */
8771 case i40e_aqc_opc_event_lan_overflow:
8772 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
8773 i40e_handle_lan_overflow_event(pf, &event);
8775 case i40e_aqc_opc_send_msg_to_peer:
8776 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8778 case i40e_aqc_opc_nvm_erase:
8779 case i40e_aqc_opc_nvm_update:
8780 case i40e_aqc_opc_oem_post_update:
8781 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8782 "ARQ NVM operation 0x%04x completed\n",
8786 dev_info(&pf->pdev->dev,
8787 "ARQ: Unknown event 0x%04x ignored\n",
8791 } while (i++ < pf->adminq_work_limit);
8793 if (i < pf->adminq_work_limit)
8794 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
8796 /* re-enable Admin queue interrupt cause */
8797 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8798 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8799 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8802 kfree(event.msg_buf);
8806 * i40e_verify_eeprom - make sure eeprom is good to use
8807 * @pf: board private structure
8809 static void i40e_verify_eeprom(struct i40e_pf *pf)
8813 err = i40e_diag_eeprom_test(&pf->hw);
8815 /* retry in case of garbage read */
8816 err = i40e_diag_eeprom_test(&pf->hw);
8818 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8820 set_bit(__I40E_BAD_EEPROM, pf->state);
8824 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
8825 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
8826 clear_bit(__I40E_BAD_EEPROM, pf->state);
8831 * i40e_enable_pf_switch_lb
8832 * @pf: pointer to the PF structure
8834 * enable switch loop back or die - no point in a return value
8836 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8838 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8839 struct i40e_vsi_context ctxt;
8842 ctxt.seid = pf->main_vsi_seid;
8843 ctxt.pf_num = pf->hw.pf_id;
8845 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8847 dev_info(&pf->pdev->dev,
8848 "couldn't get PF vsi config, err %s aq_err %s\n",
8849 i40e_stat_str(&pf->hw, ret),
8850 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8853 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8854 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8855 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8857 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8859 dev_info(&pf->pdev->dev,
8860 "update vsi switch failed, err %s aq_err %s\n",
8861 i40e_stat_str(&pf->hw, ret),
8862 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8867 * i40e_disable_pf_switch_lb
8868 * @pf: pointer to the PF structure
8870 * disable switch loop back or die - no point in a return value
8872 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8874 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8875 struct i40e_vsi_context ctxt;
8878 ctxt.seid = pf->main_vsi_seid;
8879 ctxt.pf_num = pf->hw.pf_id;
8881 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8883 dev_info(&pf->pdev->dev,
8884 "couldn't get PF vsi config, err %s aq_err %s\n",
8885 i40e_stat_str(&pf->hw, ret),
8886 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8889 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8890 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8891 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8893 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8895 dev_info(&pf->pdev->dev,
8896 "update vsi switch failed, err %s aq_err %s\n",
8897 i40e_stat_str(&pf->hw, ret),
8898 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8903 * i40e_config_bridge_mode - Configure the HW bridge mode
8904 * @veb: pointer to the bridge instance
8906 * Configure the loop back mode for the LAN VSI that is downlink to the
8907 * specified HW bridge instance. It is expected this function is called
8908 * when a new HW bridge is instantiated.
8910 static void i40e_config_bridge_mode(struct i40e_veb *veb)
8912 struct i40e_pf *pf = veb->pf;
8914 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
8915 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
8916 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8917 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
8918 i40e_disable_pf_switch_lb(pf);
8920 i40e_enable_pf_switch_lb(pf);
8924 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8925 * @veb: pointer to the VEB instance
8927 * This is a recursive function that first builds the attached VSIs then
8928 * recurses in to build the next layer of VEB. We track the connections
8929 * through our own index numbers because the seid's from the HW could
8930 * change across the reset.
8932 static int i40e_reconstitute_veb(struct i40e_veb *veb)
8934 struct i40e_vsi *ctl_vsi = NULL;
8935 struct i40e_pf *pf = veb->pf;
8939 /* build VSI that owns this VEB, temporarily attached to base VEB */
8940 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
8942 pf->vsi[v]->veb_idx == veb->idx &&
8943 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
8944 ctl_vsi = pf->vsi[v];
8949 dev_info(&pf->pdev->dev,
8950 "missing owner VSI for veb_idx %d\n", veb->idx);
8952 goto end_reconstitute;
8954 if (ctl_vsi != pf->vsi[pf->lan_vsi])
8955 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8956 ret = i40e_add_vsi(ctl_vsi);
8958 dev_info(&pf->pdev->dev,
8959 "rebuild of veb_idx %d owner VSI failed: %d\n",
8961 goto end_reconstitute;
8963 i40e_vsi_reset_stats(ctl_vsi);
8965 /* create the VEB in the switch and move the VSI onto the VEB */
8966 ret = i40e_add_veb(veb, ctl_vsi);
8968 goto end_reconstitute;
8970 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
8971 veb->bridge_mode = BRIDGE_MODE_VEB;
8973 veb->bridge_mode = BRIDGE_MODE_VEPA;
8974 i40e_config_bridge_mode(veb);
8976 /* create the remaining VSIs attached to this VEB */
8977 for (v = 0; v < pf->num_alloc_vsi; v++) {
8978 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
8981 if (pf->vsi[v]->veb_idx == veb->idx) {
8982 struct i40e_vsi *vsi = pf->vsi[v];
8984 vsi->uplink_seid = veb->seid;
8985 ret = i40e_add_vsi(vsi);
8987 dev_info(&pf->pdev->dev,
8988 "rebuild of vsi_idx %d failed: %d\n",
8990 goto end_reconstitute;
8992 i40e_vsi_reset_stats(vsi);
8996 /* create any VEBs attached to this VEB - RECURSION */
8997 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
8998 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
8999 pf->veb[veb_idx]->uplink_seid = veb->seid;
9000 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9011 * i40e_get_capabilities - get info about the HW
9012 * @pf: the PF struct
9014 static int i40e_get_capabilities(struct i40e_pf *pf,
9015 enum i40e_admin_queue_opc list_type)
9017 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9022 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9024 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9028 /* this loads the data into the hw struct for us */
9029 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9030 &data_size, list_type,
9032 /* data loaded, buffer no longer needed */
9035 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9036 /* retry with a larger buffer */
9037 buf_len = data_size;
9038 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
9039 dev_info(&pf->pdev->dev,
9040 "capability discovery failed, err %s aq_err %s\n",
9041 i40e_stat_str(&pf->hw, err),
9042 i40e_aq_str(&pf->hw,
9043 pf->hw.aq.asq_last_status));
9048 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9049 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9050 dev_info(&pf->pdev->dev,
9051 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9052 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9053 pf->hw.func_caps.num_msix_vectors,
9054 pf->hw.func_caps.num_msix_vectors_vf,
9055 pf->hw.func_caps.fd_filters_guaranteed,
9056 pf->hw.func_caps.fd_filters_best_effort,
9057 pf->hw.func_caps.num_tx_qp,
9058 pf->hw.func_caps.num_vsis);
9059 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9060 dev_info(&pf->pdev->dev,
9061 "switch_mode=0x%04x, function_valid=0x%08x\n",
9062 pf->hw.dev_caps.switch_mode,
9063 pf->hw.dev_caps.valid_functions);
9064 dev_info(&pf->pdev->dev,
9065 "SR-IOV=%d, num_vfs for all function=%u\n",
9066 pf->hw.dev_caps.sr_iov_1_1,
9067 pf->hw.dev_caps.num_vfs);
9068 dev_info(&pf->pdev->dev,
9069 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9070 pf->hw.dev_caps.num_vsis,
9071 pf->hw.dev_caps.num_rx_qp,
9072 pf->hw.dev_caps.num_tx_qp);
9075 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9076 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9077 + pf->hw.func_caps.num_vfs)
9078 if (pf->hw.revision_id == 0 &&
9079 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9080 dev_info(&pf->pdev->dev,
9081 "got num_vsis %d, setting num_vsis to %d\n",
9082 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9083 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9089 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9092 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9093 * @pf: board private structure
9095 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9097 struct i40e_vsi *vsi;
9099 /* quick workaround for an NVM issue that leaves a critical register
9102 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9103 static const u32 hkey[] = {
9104 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9105 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9106 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9110 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9111 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9114 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9117 /* find existing VSI and see if it needs configuring */
9118 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9120 /* create a new VSI if none exists */
9122 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9123 pf->vsi[pf->lan_vsi]->seid, 0);
9125 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9126 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9127 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9132 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9136 * i40e_fdir_teardown - release the Flow Director resources
9137 * @pf: board private structure
9139 static void i40e_fdir_teardown(struct i40e_pf *pf)
9141 struct i40e_vsi *vsi;
9143 i40e_fdir_filter_exit(pf);
9144 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9146 i40e_vsi_release(vsi);
9150 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9152 * @seid: seid of main or channel VSIs
9154 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9155 * existed before reset
9157 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9159 struct i40e_cloud_filter *cfilter;
9160 struct i40e_pf *pf = vsi->back;
9161 struct hlist_node *node;
9164 /* Add cloud filters back if they exist */
9165 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9167 if (cfilter->seid != seid)
9170 if (cfilter->dst_port)
9171 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9174 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9177 dev_dbg(&pf->pdev->dev,
9178 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9179 i40e_stat_str(&pf->hw, ret),
9180 i40e_aq_str(&pf->hw,
9181 pf->hw.aq.asq_last_status));
9189 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9192 * Rebuilds channel VSIs if they existed before reset
9194 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9196 struct i40e_channel *ch, *ch_tmp;
9199 if (list_empty(&vsi->ch_list))
9202 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9203 if (!ch->initialized)
9205 /* Proceed with creation of channel (VMDq2) VSI */
9206 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9208 dev_info(&vsi->back->pdev->dev,
9209 "failed to rebuild channels using uplink_seid %u\n",
9213 /* Reconfigure TX queues using QTX_CTL register */
9214 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9216 dev_info(&vsi->back->pdev->dev,
9217 "failed to configure TX rings for channel %u\n",
9221 /* update 'next_base_queue' */
9222 vsi->next_base_queue = vsi->next_base_queue +
9223 ch->num_queue_pairs;
9224 if (ch->max_tx_rate) {
9225 u64 credits = ch->max_tx_rate;
9227 if (i40e_set_bw_limit(vsi, ch->seid,
9231 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9232 dev_dbg(&vsi->back->pdev->dev,
9233 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9238 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9240 dev_dbg(&vsi->back->pdev->dev,
9241 "Failed to rebuild cloud filters for channel VSI %u\n",
9250 * i40e_prep_for_reset - prep for the core to reset
9251 * @pf: board private structure
9252 * @lock_acquired: indicates whether or not the lock has been acquired
9253 * before this function was called.
9255 * Close up the VFs and other things in prep for PF Reset.
9257 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9259 struct i40e_hw *hw = &pf->hw;
9260 i40e_status ret = 0;
9263 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9264 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9266 if (i40e_check_asq_alive(&pf->hw))
9267 i40e_vc_notify_reset(pf);
9269 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9271 /* quiesce the VSIs and their queues that are not already DOWN */
9272 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9275 i40e_pf_quiesce_all_vsi(pf);
9279 for (v = 0; v < pf->num_alloc_vsi; v++) {
9281 pf->vsi[v]->seid = 0;
9284 i40e_shutdown_adminq(&pf->hw);
9286 /* call shutdown HMC */
9287 if (hw->hmc.hmc_obj) {
9288 ret = i40e_shutdown_lan_hmc(hw);
9290 dev_warn(&pf->pdev->dev,
9291 "shutdown_lan_hmc failed: %d\n", ret);
9296 * i40e_send_version - update firmware with driver version
9299 static void i40e_send_version(struct i40e_pf *pf)
9301 struct i40e_driver_version dv;
9303 dv.major_version = DRV_VERSION_MAJOR;
9304 dv.minor_version = DRV_VERSION_MINOR;
9305 dv.build_version = DRV_VERSION_BUILD;
9306 dv.subbuild_version = 0;
9307 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9308 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9312 * i40e_get_oem_version - get OEM specific version information
9313 * @hw: pointer to the hardware structure
9315 static void i40e_get_oem_version(struct i40e_hw *hw)
9317 u16 block_offset = 0xffff;
9318 u16 block_length = 0;
9319 u16 capabilities = 0;
9323 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9324 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9325 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9326 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9327 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9328 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9329 #define I40E_NVM_OEM_LENGTH 3
9331 /* Check if pointer to OEM version block is valid. */
9332 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9333 if (block_offset == 0xffff)
9336 /* Check if OEM version block has correct length. */
9337 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9339 if (block_length < I40E_NVM_OEM_LENGTH)
9342 /* Check if OEM version format is as expected. */
9343 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9345 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9348 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9350 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9352 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9353 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9357 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9358 * @pf: board private structure
9360 static int i40e_reset(struct i40e_pf *pf)
9362 struct i40e_hw *hw = &pf->hw;
9365 ret = i40e_pf_reset(hw);
9367 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9368 set_bit(__I40E_RESET_FAILED, pf->state);
9369 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9377 * i40e_rebuild - rebuild using a saved config
9378 * @pf: board private structure
9379 * @reinit: if the Main VSI needs to re-initialized.
9380 * @lock_acquired: indicates whether or not the lock has been acquired
9381 * before this function was called.
9383 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9385 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9386 struct i40e_hw *hw = &pf->hw;
9387 u8 set_fc_aq_fail = 0;
9392 if (test_bit(__I40E_DOWN, pf->state))
9393 goto clear_recovery;
9394 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9396 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9397 ret = i40e_init_adminq(&pf->hw);
9399 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9400 i40e_stat_str(&pf->hw, ret),
9401 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9402 goto clear_recovery;
9404 i40e_get_oem_version(&pf->hw);
9406 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9407 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9408 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9409 /* The following delay is necessary for 4.33 firmware and older
9410 * to recover after EMP reset. 200 ms should suffice but we
9411 * put here 300 ms to be sure that FW is ready to operate
9417 /* re-verify the eeprom if we just had an EMP reset */
9418 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9419 i40e_verify_eeprom(pf);
9421 i40e_clear_pxe_mode(hw);
9422 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
9424 goto end_core_reset;
9426 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9427 hw->func_caps.num_rx_qp, 0, 0);
9429 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9430 goto end_core_reset;
9432 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9434 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9435 goto end_core_reset;
9438 /* Enable FW to write a default DCB config on link-up */
9439 i40e_aq_set_dcb_parameters(hw, true, NULL);
9441 #ifdef CONFIG_I40E_DCB
9442 ret = i40e_init_pf_dcb(pf);
9444 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9445 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9446 /* Continue without DCB enabled */
9448 #endif /* CONFIG_I40E_DCB */
9449 /* do basic switch setup */
9452 ret = i40e_setup_pf_switch(pf, reinit);
9456 /* The driver only wants link up/down and module qualification
9457 * reports from firmware. Note the negative logic.
9459 ret = i40e_aq_set_phy_int_mask(&pf->hw,
9460 ~(I40E_AQ_EVENT_LINK_UPDOWN |
9461 I40E_AQ_EVENT_MEDIA_NA |
9462 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
9464 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9465 i40e_stat_str(&pf->hw, ret),
9466 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9468 /* make sure our flow control settings are restored */
9469 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
9471 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
9472 i40e_stat_str(&pf->hw, ret),
9473 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9475 /* Rebuild the VSIs and VEBs that existed before reset.
9476 * They are still in our local switch element arrays, so only
9477 * need to rebuild the switch model in the HW.
9479 * If there were VEBs but the reconstitution failed, we'll try
9480 * try to recover minimal use by getting the basic PF VSI working.
9482 if (vsi->uplink_seid != pf->mac_seid) {
9483 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
9484 /* find the one VEB connected to the MAC, and find orphans */
9485 for (v = 0; v < I40E_MAX_VEB; v++) {
9489 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9490 pf->veb[v]->uplink_seid == 0) {
9491 ret = i40e_reconstitute_veb(pf->veb[v]);
9496 /* If Main VEB failed, we're in deep doodoo,
9497 * so give up rebuilding the switch and set up
9498 * for minimal rebuild of PF VSI.
9499 * If orphan failed, we'll report the error
9500 * but try to keep going.
9502 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9503 dev_info(&pf->pdev->dev,
9504 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9506 vsi->uplink_seid = pf->mac_seid;
9508 } else if (pf->veb[v]->uplink_seid == 0) {
9509 dev_info(&pf->pdev->dev,
9510 "rebuild of orphan VEB failed: %d\n",
9517 if (vsi->uplink_seid == pf->mac_seid) {
9518 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
9519 /* no VEB, so rebuild only the Main VSI */
9520 ret = i40e_add_vsi(vsi);
9522 dev_info(&pf->pdev->dev,
9523 "rebuild of Main VSI failed: %d\n", ret);
9528 if (vsi->mqprio_qopt.max_rate[0]) {
9529 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9532 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
9533 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
9537 credits = max_tx_rate;
9538 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9539 dev_dbg(&vsi->back->pdev->dev,
9540 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9546 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9550 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9551 * for this main VSI if they exist
9553 ret = i40e_rebuild_channels(vsi);
9557 /* Reconfigure hardware for allowing smaller MSS in the case
9558 * of TSO, so that we avoid the MDD being fired and causing
9559 * a reset in the case of small MSS+TSO.
9561 #define I40E_REG_MSS 0x000E64DC
9562 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9563 #define I40E_64BYTE_MSS 0x400000
9564 val = rd32(hw, I40E_REG_MSS);
9565 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9566 val &= ~I40E_REG_MSS_MIN_MASK;
9567 val |= I40E_64BYTE_MSS;
9568 wr32(hw, I40E_REG_MSS, val);
9571 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
9573 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9575 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9576 i40e_stat_str(&pf->hw, ret),
9577 i40e_aq_str(&pf->hw,
9578 pf->hw.aq.asq_last_status));
9580 /* reinit the misc interrupt */
9581 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9582 ret = i40e_setup_misc_vector(pf);
9584 /* Add a filter to drop all Flow control frames from any VSI from being
9585 * transmitted. By doing so we stop a malicious VF from sending out
9586 * PAUSE or PFC frames and potentially controlling traffic for other
9588 * The FW can still send Flow control frames if enabled.
9590 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9593 /* restart the VSIs that were rebuilt and running before the reset */
9594 i40e_pf_unquiesce_all_vsi(pf);
9596 /* Release the RTNL lock before we start resetting VFs */
9600 /* Restore promiscuous settings */
9601 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9603 dev_warn(&pf->pdev->dev,
9604 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9605 pf->cur_promisc ? "on" : "off",
9606 i40e_stat_str(&pf->hw, ret),
9607 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9609 i40e_reset_all_vfs(pf, true);
9611 /* tell the firmware that we're starting */
9612 i40e_send_version(pf);
9614 /* We've already released the lock, so don't do it again */
9615 goto end_core_reset;
9621 clear_bit(__I40E_RESET_FAILED, pf->state);
9623 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9624 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
9628 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9629 * @pf: board private structure
9630 * @reinit: if the Main VSI needs to re-initialized.
9631 * @lock_acquired: indicates whether or not the lock has been acquired
9632 * before this function was called.
9634 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9638 /* Now we wait for GRST to settle out.
9639 * We don't have to delete the VEBs or VSIs from the hw switch
9640 * because the reset will make them disappear.
9642 ret = i40e_reset(pf);
9644 i40e_rebuild(pf, reinit, lock_acquired);
9648 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9649 * @pf: board private structure
9651 * Close up the VFs and other things in prep for a Core Reset,
9652 * then get ready to rebuild the world.
9653 * @lock_acquired: indicates whether or not the lock has been acquired
9654 * before this function was called.
9656 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
9658 i40e_prep_for_reset(pf, lock_acquired);
9659 i40e_reset_and_rebuild(pf, false, lock_acquired);
9663 * i40e_handle_mdd_event
9664 * @pf: pointer to the PF structure
9666 * Called from the MDD irq handler to identify possibly malicious vfs
9668 static void i40e_handle_mdd_event(struct i40e_pf *pf)
9670 struct i40e_hw *hw = &pf->hw;
9671 bool mdd_detected = false;
9672 bool pf_mdd_detected = false;
9677 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
9680 /* find what triggered the MDD event */
9681 reg = rd32(hw, I40E_GL_MDET_TX);
9682 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
9683 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9684 I40E_GL_MDET_TX_PF_NUM_SHIFT;
9685 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
9686 I40E_GL_MDET_TX_VF_NUM_SHIFT;
9687 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
9688 I40E_GL_MDET_TX_EVENT_SHIFT;
9689 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9690 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9691 pf->hw.func_caps.base_queue;
9692 if (netif_msg_tx_err(pf))
9693 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9694 event, queue, pf_num, vf_num);
9695 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9696 mdd_detected = true;
9698 reg = rd32(hw, I40E_GL_MDET_RX);
9699 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
9700 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9701 I40E_GL_MDET_RX_FUNCTION_SHIFT;
9702 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
9703 I40E_GL_MDET_RX_EVENT_SHIFT;
9704 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9705 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9706 pf->hw.func_caps.base_queue;
9707 if (netif_msg_rx_err(pf))
9708 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9709 event, queue, func);
9710 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9711 mdd_detected = true;
9715 reg = rd32(hw, I40E_PF_MDET_TX);
9716 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9717 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9718 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
9719 pf_mdd_detected = true;
9721 reg = rd32(hw, I40E_PF_MDET_RX);
9722 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9723 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9724 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
9725 pf_mdd_detected = true;
9727 /* Queue belongs to the PF, initiate a reset */
9728 if (pf_mdd_detected) {
9729 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9730 i40e_service_event_schedule(pf);
9734 /* see if one of the VFs needs its hand slapped */
9735 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9737 reg = rd32(hw, I40E_VP_MDET_TX(i));
9738 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9739 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9740 vf->num_mdd_events++;
9741 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9745 reg = rd32(hw, I40E_VP_MDET_RX(i));
9746 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9747 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9748 vf->num_mdd_events++;
9749 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9753 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9754 dev_info(&pf->pdev->dev,
9755 "Too many MDD events on VF %d, disabled\n", i);
9756 dev_info(&pf->pdev->dev,
9757 "Use PF Control I/F to re-enable the VF\n");
9758 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9762 /* re-enable mdd interrupt cause */
9763 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
9764 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9765 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9766 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9770 static const char *i40e_tunnel_name(u8 type)
9773 case UDP_TUNNEL_TYPE_VXLAN:
9775 case UDP_TUNNEL_TYPE_GENEVE:
9783 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9784 * @pf: board private structure
9786 static void i40e_sync_udp_filters(struct i40e_pf *pf)
9790 /* loop through and set pending bit for all active UDP filters */
9791 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9792 if (pf->udp_ports[i].port)
9793 pf->pending_udp_bitmap |= BIT_ULL(i);
9796 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
9800 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9801 * @pf: board private structure
9803 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9805 struct i40e_hw *hw = &pf->hw;
9806 u8 filter_index, type;
9810 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
9813 /* acquire RTNL to maintain state of flags and port requests */
9816 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9817 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9818 struct i40e_udp_port_config *udp_port;
9819 i40e_status ret = 0;
9821 udp_port = &pf->udp_ports[i];
9822 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9824 port = READ_ONCE(udp_port->port);
9825 type = READ_ONCE(udp_port->type);
9826 filter_index = READ_ONCE(udp_port->filter_index);
9828 /* release RTNL while we wait on AQ command */
9832 ret = i40e_aq_add_udp_tunnel(hw, port,
9836 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9837 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9840 /* reacquire RTNL so we can update filter_index */
9844 dev_info(&pf->pdev->dev,
9845 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9846 i40e_tunnel_name(type),
9847 port ? "add" : "delete",
9850 i40e_stat_str(&pf->hw, ret),
9851 i40e_aq_str(&pf->hw,
9852 pf->hw.aq.asq_last_status));
9854 /* failed to add, just reset port,
9855 * drop pending bit for any deletion
9858 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9861 /* record filter index on success */
9862 udp_port->filter_index = filter_index;
9871 * i40e_service_task - Run the driver's async subtasks
9872 * @work: pointer to work_struct containing our data
9874 static void i40e_service_task(struct work_struct *work)
9876 struct i40e_pf *pf = container_of(work,
9879 unsigned long start_time = jiffies;
9881 /* don't bother with service tasks if a reset is in progress */
9882 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9885 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
9888 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
9889 i40e_sync_filters_subtask(pf);
9890 i40e_reset_subtask(pf);
9891 i40e_handle_mdd_event(pf);
9892 i40e_vc_process_vflr_event(pf);
9893 i40e_watchdog_subtask(pf);
9894 i40e_fdir_reinit_subtask(pf);
9895 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
9896 /* Client subtask will reopen next time through. */
9897 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
9899 i40e_client_subtask(pf);
9900 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
9902 i40e_notify_client_of_l2_param_changes(
9903 pf->vsi[pf->lan_vsi]);
9905 i40e_sync_filters_subtask(pf);
9906 i40e_sync_udp_filters_subtask(pf);
9907 i40e_clean_adminq_subtask(pf);
9909 /* flush memory to make sure state is correct before next watchdog */
9910 smp_mb__before_atomic();
9911 clear_bit(__I40E_SERVICE_SCHED, pf->state);
9913 /* If the tasks have taken longer than one timer cycle or there
9914 * is more work to be done, reschedule the service task now
9915 * rather than wait for the timer to tick again.
9917 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
9918 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
9919 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
9920 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
9921 i40e_service_event_schedule(pf);
9925 * i40e_service_timer - timer callback
9926 * @data: pointer to PF struct
9928 static void i40e_service_timer(struct timer_list *t)
9930 struct i40e_pf *pf = from_timer(pf, t, service_timer);
9932 mod_timer(&pf->service_timer,
9933 round_jiffies(jiffies + pf->service_timer_period));
9934 i40e_service_event_schedule(pf);
9938 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9939 * @vsi: the VSI being configured
9941 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
9943 struct i40e_pf *pf = vsi->back;
9945 switch (vsi->type) {
9947 vsi->alloc_queue_pairs = pf->num_lan_qps;
9948 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9949 I40E_REQ_DESCRIPTOR_MULTIPLE);
9950 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9951 vsi->num_q_vectors = pf->num_lan_msix;
9953 vsi->num_q_vectors = 1;
9958 vsi->alloc_queue_pairs = 1;
9959 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
9960 I40E_REQ_DESCRIPTOR_MULTIPLE);
9961 vsi->num_q_vectors = pf->num_fdsb_msix;
9964 case I40E_VSI_VMDQ2:
9965 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
9966 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9967 I40E_REQ_DESCRIPTOR_MULTIPLE);
9968 vsi->num_q_vectors = pf->num_vmdq_msix;
9971 case I40E_VSI_SRIOV:
9972 vsi->alloc_queue_pairs = pf->num_vf_qps;
9973 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9974 I40E_REQ_DESCRIPTOR_MULTIPLE);
9986 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
9988 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
9990 * On error: returns error code (negative)
9991 * On success: returns 0
9993 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
9995 struct i40e_ring **next_rings;
9999 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10000 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10001 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10002 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10003 if (!vsi->tx_rings)
10005 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10006 if (i40e_enabled_xdp_vsi(vsi)) {
10007 vsi->xdp_rings = next_rings;
10008 next_rings += vsi->alloc_queue_pairs;
10010 vsi->rx_rings = next_rings;
10012 if (alloc_qvectors) {
10013 /* allocate memory for q_vector pointers */
10014 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10015 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10016 if (!vsi->q_vectors) {
10024 kfree(vsi->tx_rings);
10029 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10030 * @pf: board private structure
10031 * @type: type of VSI
10033 * On error: returns error code (negative)
10034 * On success: returns vsi index in PF (positive)
10036 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10039 struct i40e_vsi *vsi;
10043 /* Need to protect the allocation of the VSIs at the PF level */
10044 mutex_lock(&pf->switch_mutex);
10046 /* VSI list may be fragmented if VSI creation/destruction has
10047 * been happening. We can afford to do a quick scan to look
10048 * for any free VSIs in the list.
10050 * find next empty vsi slot, looping back around if necessary
10053 while (i < pf->num_alloc_vsi && pf->vsi[i])
10055 if (i >= pf->num_alloc_vsi) {
10057 while (i < pf->next_vsi && pf->vsi[i])
10061 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10062 vsi_idx = i; /* Found one! */
10065 goto unlock_pf; /* out of VSI slots! */
10067 pf->next_vsi = ++i;
10069 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10076 set_bit(__I40E_VSI_DOWN, vsi->state);
10078 vsi->idx = vsi_idx;
10079 vsi->int_rate_limit = 0;
10080 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10081 pf->rss_table_size : 64;
10082 vsi->netdev_registered = false;
10083 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10084 hash_init(vsi->mac_filter_hash);
10085 vsi->irqs_ready = false;
10087 ret = i40e_set_num_rings_in_vsi(vsi);
10091 ret = i40e_vsi_alloc_arrays(vsi, true);
10095 /* Setup default MSIX irq handler for VSI */
10096 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10098 /* Initialize VSI lock */
10099 spin_lock_init(&vsi->mac_filter_hash_lock);
10100 pf->vsi[vsi_idx] = vsi;
10105 pf->next_vsi = i - 1;
10108 mutex_unlock(&pf->switch_mutex);
10113 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10114 * @vsi: VSI pointer
10115 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10117 * On error: returns error code (negative)
10118 * On success: returns 0
10120 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10122 /* free the ring and vector containers */
10123 if (free_qvectors) {
10124 kfree(vsi->q_vectors);
10125 vsi->q_vectors = NULL;
10127 kfree(vsi->tx_rings);
10128 vsi->tx_rings = NULL;
10129 vsi->rx_rings = NULL;
10130 vsi->xdp_rings = NULL;
10134 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10136 * @vsi: Pointer to VSI structure
10138 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10143 kfree(vsi->rss_hkey_user);
10144 vsi->rss_hkey_user = NULL;
10146 kfree(vsi->rss_lut_user);
10147 vsi->rss_lut_user = NULL;
10151 * i40e_vsi_clear - Deallocate the VSI provided
10152 * @vsi: the VSI being un-configured
10154 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10156 struct i40e_pf *pf;
10165 mutex_lock(&pf->switch_mutex);
10166 if (!pf->vsi[vsi->idx]) {
10167 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10168 vsi->idx, vsi->idx, vsi->type);
10172 if (pf->vsi[vsi->idx] != vsi) {
10173 dev_err(&pf->pdev->dev,
10174 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10175 pf->vsi[vsi->idx]->idx,
10176 pf->vsi[vsi->idx]->type,
10177 vsi->idx, vsi->type);
10181 /* updates the PF for this cleared vsi */
10182 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10183 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10185 i40e_vsi_free_arrays(vsi, true);
10186 i40e_clear_rss_config_user(vsi);
10188 pf->vsi[vsi->idx] = NULL;
10189 if (vsi->idx < pf->next_vsi)
10190 pf->next_vsi = vsi->idx;
10193 mutex_unlock(&pf->switch_mutex);
10201 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10202 * @vsi: the VSI being cleaned
10204 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10208 if (vsi->tx_rings && vsi->tx_rings[0]) {
10209 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10210 kfree_rcu(vsi->tx_rings[i], rcu);
10211 vsi->tx_rings[i] = NULL;
10212 vsi->rx_rings[i] = NULL;
10213 if (vsi->xdp_rings)
10214 vsi->xdp_rings[i] = NULL;
10220 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10221 * @vsi: the VSI being configured
10223 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10225 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10226 struct i40e_pf *pf = vsi->back;
10227 struct i40e_ring *ring;
10229 /* Set basic values in the rings to be used later during open() */
10230 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10231 /* allocate space for both Tx and Rx in one shot */
10232 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10236 ring->queue_index = i;
10237 ring->reg_idx = vsi->base_queue + i;
10238 ring->ring_active = false;
10240 ring->netdev = vsi->netdev;
10241 ring->dev = &pf->pdev->dev;
10242 ring->count = vsi->num_desc;
10245 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10246 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10247 ring->itr_setting = pf->tx_itr_default;
10248 vsi->tx_rings[i] = ring++;
10250 if (!i40e_enabled_xdp_vsi(vsi))
10253 ring->queue_index = vsi->alloc_queue_pairs + i;
10254 ring->reg_idx = vsi->base_queue + ring->queue_index;
10255 ring->ring_active = false;
10257 ring->netdev = NULL;
10258 ring->dev = &pf->pdev->dev;
10259 ring->count = vsi->num_desc;
10262 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10263 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10264 set_ring_xdp(ring);
10265 ring->itr_setting = pf->tx_itr_default;
10266 vsi->xdp_rings[i] = ring++;
10269 ring->queue_index = i;
10270 ring->reg_idx = vsi->base_queue + i;
10271 ring->ring_active = false;
10273 ring->netdev = vsi->netdev;
10274 ring->dev = &pf->pdev->dev;
10275 ring->count = vsi->num_desc;
10278 ring->itr_setting = pf->rx_itr_default;
10279 vsi->rx_rings[i] = ring;
10285 i40e_vsi_clear_rings(vsi);
10290 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10291 * @pf: board private structure
10292 * @vectors: the number of MSI-X vectors to request
10294 * Returns the number of vectors reserved, or error
10296 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10298 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10299 I40E_MIN_MSIX, vectors);
10301 dev_info(&pf->pdev->dev,
10302 "MSI-X vector reservation failed: %d\n", vectors);
10310 * i40e_init_msix - Setup the MSIX capability
10311 * @pf: board private structure
10313 * Work with the OS to set up the MSIX vectors needed.
10315 * Returns the number of vectors reserved or negative on failure
10317 static int i40e_init_msix(struct i40e_pf *pf)
10319 struct i40e_hw *hw = &pf->hw;
10320 int cpus, extra_vectors;
10324 int iwarp_requested = 0;
10326 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10329 /* The number of vectors we'll request will be comprised of:
10330 * - Add 1 for "other" cause for Admin Queue events, etc.
10331 * - The number of LAN queue pairs
10332 * - Queues being used for RSS.
10333 * We don't need as many as max_rss_size vectors.
10334 * use rss_size instead in the calculation since that
10335 * is governed by number of cpus in the system.
10336 * - assumes symmetric Tx/Rx pairing
10337 * - The number of VMDq pairs
10338 * - The CPU count within the NUMA node if iWARP is enabled
10339 * Once we count this up, try the request.
10341 * If we can't get what we want, we'll simplify to nearly nothing
10342 * and try again. If that still fails, we punt.
10344 vectors_left = hw->func_caps.num_msix_vectors;
10347 /* reserve one vector for miscellaneous handler */
10348 if (vectors_left) {
10353 /* reserve some vectors for the main PF traffic queues. Initially we
10354 * only reserve at most 50% of the available vectors, in the case that
10355 * the number of online CPUs is large. This ensures that we can enable
10356 * extra features as well. Once we've enabled the other features, we
10357 * will use any remaining vectors to reach as close as we can to the
10358 * number of online CPUs.
10360 cpus = num_online_cpus();
10361 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10362 vectors_left -= pf->num_lan_msix;
10364 /* reserve one vector for sideband flow director */
10365 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10366 if (vectors_left) {
10367 pf->num_fdsb_msix = 1;
10371 pf->num_fdsb_msix = 0;
10375 /* can we reserve enough for iWARP? */
10376 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10377 iwarp_requested = pf->num_iwarp_msix;
10380 pf->num_iwarp_msix = 0;
10381 else if (vectors_left < pf->num_iwarp_msix)
10382 pf->num_iwarp_msix = 1;
10383 v_budget += pf->num_iwarp_msix;
10384 vectors_left -= pf->num_iwarp_msix;
10387 /* any vectors left over go for VMDq support */
10388 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10389 if (!vectors_left) {
10390 pf->num_vmdq_msix = 0;
10391 pf->num_vmdq_qps = 0;
10393 int vmdq_vecs_wanted =
10394 pf->num_vmdq_vsis * pf->num_vmdq_qps;
10396 min_t(int, vectors_left, vmdq_vecs_wanted);
10398 /* if we're short on vectors for what's desired, we limit
10399 * the queues per vmdq. If this is still more than are
10400 * available, the user will need to change the number of
10401 * queues/vectors used by the PF later with the ethtool
10404 if (vectors_left < vmdq_vecs_wanted) {
10405 pf->num_vmdq_qps = 1;
10406 vmdq_vecs_wanted = pf->num_vmdq_vsis;
10407 vmdq_vecs = min_t(int,
10411 pf->num_vmdq_msix = pf->num_vmdq_qps;
10413 v_budget += vmdq_vecs;
10414 vectors_left -= vmdq_vecs;
10418 /* On systems with a large number of SMP cores, we previously limited
10419 * the number of vectors for num_lan_msix to be at most 50% of the
10420 * available vectors, to allow for other features. Now, we add back
10421 * the remaining vectors. However, we ensure that the total
10422 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10423 * calculate the number of vectors we can add without going over the
10424 * cap of CPUs. For systems with a small number of CPUs this will be
10427 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10428 pf->num_lan_msix += extra_vectors;
10429 vectors_left -= extra_vectors;
10431 WARN(vectors_left < 0,
10432 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10434 v_budget += pf->num_lan_msix;
10435 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10437 if (!pf->msix_entries)
10440 for (i = 0; i < v_budget; i++)
10441 pf->msix_entries[i].entry = i;
10442 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
10444 if (v_actual < I40E_MIN_MSIX) {
10445 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10446 kfree(pf->msix_entries);
10447 pf->msix_entries = NULL;
10448 pci_disable_msix(pf->pdev);
10451 } else if (v_actual == I40E_MIN_MSIX) {
10452 /* Adjust for minimal MSIX use */
10453 pf->num_vmdq_vsis = 0;
10454 pf->num_vmdq_qps = 0;
10455 pf->num_lan_qps = 1;
10456 pf->num_lan_msix = 1;
10458 } else if (v_actual != v_budget) {
10459 /* If we have limited resources, we will start with no vectors
10460 * for the special features and then allocate vectors to some
10461 * of these features based on the policy and at the end disable
10462 * the features that did not get any vectors.
10466 dev_info(&pf->pdev->dev,
10467 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10468 v_actual, v_budget);
10469 /* reserve the misc vector */
10470 vec = v_actual - 1;
10472 /* Scale vector usage down */
10473 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
10474 pf->num_vmdq_vsis = 1;
10475 pf->num_vmdq_qps = 1;
10477 /* partition out the remaining vectors */
10480 pf->num_lan_msix = 1;
10483 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10484 pf->num_lan_msix = 1;
10485 pf->num_iwarp_msix = 1;
10487 pf->num_lan_msix = 2;
10491 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10492 pf->num_iwarp_msix = min_t(int, (vec / 3),
10494 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10495 I40E_DEFAULT_NUM_VMDQ_VSI);
10497 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10498 I40E_DEFAULT_NUM_VMDQ_VSI);
10500 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10501 pf->num_fdsb_msix = 1;
10504 pf->num_lan_msix = min_t(int,
10505 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10507 pf->num_lan_qps = pf->num_lan_msix;
10512 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10513 (pf->num_fdsb_msix == 0)) {
10514 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10515 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10516 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10518 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10519 (pf->num_vmdq_msix == 0)) {
10520 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10521 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10524 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10525 (pf->num_iwarp_msix == 0)) {
10526 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10527 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10529 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10530 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10532 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10534 pf->num_iwarp_msix);
10540 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10541 * @vsi: the VSI being configured
10542 * @v_idx: index of the vector in the vsi struct
10543 * @cpu: cpu to be used on affinity_mask
10545 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10547 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
10549 struct i40e_q_vector *q_vector;
10551 /* allocate q_vector */
10552 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10556 q_vector->vsi = vsi;
10557 q_vector->v_idx = v_idx;
10558 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
10561 netif_napi_add(vsi->netdev, &q_vector->napi,
10562 i40e_napi_poll, NAPI_POLL_WEIGHT);
10564 /* tie q_vector and vsi together */
10565 vsi->q_vectors[v_idx] = q_vector;
10571 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10572 * @vsi: the VSI being configured
10574 * We allocate one q_vector per queue interrupt. If allocation fails we
10577 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
10579 struct i40e_pf *pf = vsi->back;
10580 int err, v_idx, num_q_vectors, current_cpu;
10582 /* if not MSIX, give the one vector only to the LAN VSI */
10583 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10584 num_q_vectors = vsi->num_q_vectors;
10585 else if (vsi == pf->vsi[pf->lan_vsi])
10590 current_cpu = cpumask_first(cpu_online_mask);
10592 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10593 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
10596 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10597 if (unlikely(current_cpu >= nr_cpu_ids))
10598 current_cpu = cpumask_first(cpu_online_mask);
10605 i40e_free_q_vector(vsi, v_idx);
10611 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10612 * @pf: board private structure to initialize
10614 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10619 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10620 vectors = i40e_init_msix(pf);
10622 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
10623 I40E_FLAG_IWARP_ENABLED |
10624 I40E_FLAG_RSS_ENABLED |
10625 I40E_FLAG_DCB_CAPABLE |
10626 I40E_FLAG_DCB_ENABLED |
10627 I40E_FLAG_SRIOV_ENABLED |
10628 I40E_FLAG_FD_SB_ENABLED |
10629 I40E_FLAG_FD_ATR_ENABLED |
10630 I40E_FLAG_VMDQ_ENABLED);
10631 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10633 /* rework the queue expectations without MSIX */
10634 i40e_determine_queue_usage(pf);
10638 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10639 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
10640 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
10641 vectors = pci_enable_msi(pf->pdev);
10643 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10645 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10647 vectors = 1; /* one MSI or Legacy vector */
10650 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
10651 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10653 /* set up vector assignment tracking */
10654 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10655 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10659 pf->irq_pile->num_entries = vectors;
10660 pf->irq_pile->search_hint = 0;
10662 /* track first vector for misc interrupts, ignore return */
10663 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
10669 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10670 * @pf: private board data structure
10672 * Restore the interrupt scheme that was cleared when we suspended the
10673 * device. This should be called during resume to re-allocate the q_vectors
10674 * and reacquire IRQs.
10676 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10680 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10681 * scheme. We need to re-enabled them here in order to attempt to
10682 * re-acquire the MSI or MSI-X vectors
10684 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10686 err = i40e_init_interrupt_scheme(pf);
10690 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10691 * rings together again.
10693 for (i = 0; i < pf->num_alloc_vsi; i++) {
10695 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10698 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10702 err = i40e_setup_misc_vector(pf);
10706 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
10707 i40e_client_update_msix_info(pf);
10714 i40e_vsi_free_q_vectors(pf->vsi[i]);
10721 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10722 * @pf: board private structure
10724 * This sets up the handler for MSIX 0, which is used to manage the
10725 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10726 * when in MSI or Legacy interrupt mode.
10728 static int i40e_setup_misc_vector(struct i40e_pf *pf)
10730 struct i40e_hw *hw = &pf->hw;
10733 /* Only request the IRQ once, the first time through. */
10734 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
10735 err = request_irq(pf->msix_entries[0].vector,
10736 i40e_intr, 0, pf->int_name, pf);
10738 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
10739 dev_info(&pf->pdev->dev,
10740 "request_irq for %s failed: %d\n",
10741 pf->int_name, err);
10746 i40e_enable_misc_int_causes(pf);
10748 /* associate no queues to the misc vector */
10749 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10750 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
10754 i40e_irq_dynamic_enable_icr0(pf);
10760 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10761 * @vsi: Pointer to vsi structure
10762 * @seed: Buffter to store the hash keys
10763 * @lut: Buffer to store the lookup table entries
10764 * @lut_size: Size of buffer to store the lookup table entries
10766 * Return 0 on success, negative on failure
10768 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10769 u8 *lut, u16 lut_size)
10771 struct i40e_pf *pf = vsi->back;
10772 struct i40e_hw *hw = &pf->hw;
10776 ret = i40e_aq_get_rss_key(hw, vsi->id,
10777 (struct i40e_aqc_get_set_rss_key_data *)seed);
10779 dev_info(&pf->pdev->dev,
10780 "Cannot get RSS key, err %s aq_err %s\n",
10781 i40e_stat_str(&pf->hw, ret),
10782 i40e_aq_str(&pf->hw,
10783 pf->hw.aq.asq_last_status));
10789 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10791 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10793 dev_info(&pf->pdev->dev,
10794 "Cannot get RSS lut, err %s aq_err %s\n",
10795 i40e_stat_str(&pf->hw, ret),
10796 i40e_aq_str(&pf->hw,
10797 pf->hw.aq.asq_last_status));
10806 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10807 * @vsi: Pointer to vsi structure
10808 * @seed: RSS hash seed
10809 * @lut: Lookup table
10810 * @lut_size: Lookup table size
10812 * Returns 0 on success, negative on failure
10814 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10815 const u8 *lut, u16 lut_size)
10817 struct i40e_pf *pf = vsi->back;
10818 struct i40e_hw *hw = &pf->hw;
10819 u16 vf_id = vsi->vf_id;
10822 /* Fill out hash function seed */
10824 u32 *seed_dw = (u32 *)seed;
10826 if (vsi->type == I40E_VSI_MAIN) {
10827 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10828 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
10829 } else if (vsi->type == I40E_VSI_SRIOV) {
10830 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
10831 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
10833 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10838 u32 *lut_dw = (u32 *)lut;
10840 if (vsi->type == I40E_VSI_MAIN) {
10841 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10843 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10844 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10845 } else if (vsi->type == I40E_VSI_SRIOV) {
10846 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10848 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
10849 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
10851 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10860 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10861 * @vsi: Pointer to VSI structure
10862 * @seed: Buffer to store the keys
10863 * @lut: Buffer to store the lookup table entries
10864 * @lut_size: Size of buffer to store the lookup table entries
10866 * Returns 0 on success, negative on failure
10868 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
10869 u8 *lut, u16 lut_size)
10871 struct i40e_pf *pf = vsi->back;
10872 struct i40e_hw *hw = &pf->hw;
10876 u32 *seed_dw = (u32 *)seed;
10878 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10879 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
10882 u32 *lut_dw = (u32 *)lut;
10884 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10886 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10887 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
10894 * i40e_config_rss - Configure RSS keys and lut
10895 * @vsi: Pointer to VSI structure
10896 * @seed: RSS hash seed
10897 * @lut: Lookup table
10898 * @lut_size: Lookup table size
10900 * Returns 0 on success, negative on failure
10902 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10904 struct i40e_pf *pf = vsi->back;
10906 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10907 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
10909 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
10913 * i40e_get_rss - Get RSS keys and lut
10914 * @vsi: Pointer to VSI structure
10915 * @seed: Buffer to store the keys
10916 * @lut: Buffer to store the lookup table entries
10917 * @lut_size: Size of buffer to store the lookup table entries
10919 * Returns 0 on success, negative on failure
10921 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10923 struct i40e_pf *pf = vsi->back;
10925 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10926 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
10928 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
10932 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10933 * @pf: Pointer to board private structure
10934 * @lut: Lookup table
10935 * @rss_table_size: Lookup table size
10936 * @rss_size: Range of queue number for hashing
10938 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
10939 u16 rss_table_size, u16 rss_size)
10943 for (i = 0; i < rss_table_size; i++)
10944 lut[i] = i % rss_size;
10948 * i40e_pf_config_rss - Prepare for RSS if used
10949 * @pf: board private structure
10951 static int i40e_pf_config_rss(struct i40e_pf *pf)
10953 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10954 u8 seed[I40E_HKEY_ARRAY_SIZE];
10956 struct i40e_hw *hw = &pf->hw;
10961 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10962 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
10963 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
10964 hena |= i40e_pf_get_default_rss_hena(pf);
10966 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
10967 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
10969 /* Determine the RSS table size based on the hardware capabilities */
10970 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
10971 reg_val = (pf->rss_table_size == 512) ?
10972 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
10973 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
10974 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
10976 /* Determine the RSS size of the VSI */
10977 if (!vsi->rss_size) {
10979 /* If the firmware does something weird during VSI init, we
10980 * could end up with zero TCs. Check for that to avoid
10981 * divide-by-zero. It probably won't pass traffic, but it also
10984 qcount = vsi->num_queue_pairs /
10985 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
10986 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
10988 if (!vsi->rss_size)
10991 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
10995 /* Use user configured lut if there is one, otherwise use default */
10996 if (vsi->rss_lut_user)
10997 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
10999 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11001 /* Use user configured hash key if there is one, otherwise
11004 if (vsi->rss_hkey_user)
11005 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11007 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11008 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11015 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11016 * @pf: board private structure
11017 * @queue_count: the requested queue count for rss.
11019 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11020 * count which may be different from the requested queue count.
11021 * Note: expects to be called while under rtnl_lock()
11023 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11025 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11028 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11031 queue_count = min_t(int, queue_count, num_online_cpus());
11032 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11034 if (queue_count != vsi->num_queue_pairs) {
11037 vsi->req_queue_pairs = queue_count;
11038 i40e_prep_for_reset(pf, true);
11040 pf->alloc_rss_size = new_rss_size;
11042 i40e_reset_and_rebuild(pf, true, true);
11044 /* Discard the user configured hash keys and lut, if less
11045 * queues are enabled.
11047 if (queue_count < vsi->rss_size) {
11048 i40e_clear_rss_config_user(vsi);
11049 dev_dbg(&pf->pdev->dev,
11050 "discard user configured hash keys and lut\n");
11053 /* Reset vsi->rss_size, as number of enabled queues changed */
11054 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11055 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11057 i40e_pf_config_rss(pf);
11059 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11060 vsi->req_queue_pairs, pf->rss_size_max);
11061 return pf->alloc_rss_size;
11065 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11066 * @pf: board private structure
11068 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11070 i40e_status status;
11071 bool min_valid, max_valid;
11072 u32 max_bw, min_bw;
11074 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11075 &min_valid, &max_valid);
11079 pf->min_bw = min_bw;
11081 pf->max_bw = max_bw;
11088 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11089 * @pf: board private structure
11091 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11093 struct i40e_aqc_configure_partition_bw_data bw_data;
11094 i40e_status status;
11096 /* Set the valid bit for this PF */
11097 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11098 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11099 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11101 /* Set the new bandwidths */
11102 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11108 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11109 * @pf: board private structure
11111 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11113 /* Commit temporary BW setting to permanent NVM image */
11114 enum i40e_admin_queue_err last_aq_status;
11118 if (pf->hw.partition_id != 1) {
11119 dev_info(&pf->pdev->dev,
11120 "Commit BW only works on partition 1! This is partition %d",
11121 pf->hw.partition_id);
11122 ret = I40E_NOT_SUPPORTED;
11123 goto bw_commit_out;
11126 /* Acquire NVM for read access */
11127 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11128 last_aq_status = pf->hw.aq.asq_last_status;
11130 dev_info(&pf->pdev->dev,
11131 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11132 i40e_stat_str(&pf->hw, ret),
11133 i40e_aq_str(&pf->hw, last_aq_status));
11134 goto bw_commit_out;
11137 /* Read word 0x10 of NVM - SW compatibility word 1 */
11138 ret = i40e_aq_read_nvm(&pf->hw,
11139 I40E_SR_NVM_CONTROL_WORD,
11140 0x10, sizeof(nvm_word), &nvm_word,
11142 /* Save off last admin queue command status before releasing
11145 last_aq_status = pf->hw.aq.asq_last_status;
11146 i40e_release_nvm(&pf->hw);
11148 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11149 i40e_stat_str(&pf->hw, ret),
11150 i40e_aq_str(&pf->hw, last_aq_status));
11151 goto bw_commit_out;
11154 /* Wait a bit for NVM release to complete */
11157 /* Acquire NVM for write access */
11158 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11159 last_aq_status = pf->hw.aq.asq_last_status;
11161 dev_info(&pf->pdev->dev,
11162 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11163 i40e_stat_str(&pf->hw, ret),
11164 i40e_aq_str(&pf->hw, last_aq_status));
11165 goto bw_commit_out;
11167 /* Write it back out unchanged to initiate update NVM,
11168 * which will force a write of the shadow (alt) RAM to
11169 * the NVM - thus storing the bandwidth values permanently.
11171 ret = i40e_aq_update_nvm(&pf->hw,
11172 I40E_SR_NVM_CONTROL_WORD,
11173 0x10, sizeof(nvm_word),
11174 &nvm_word, true, 0, NULL);
11175 /* Save off last admin queue command status before releasing
11178 last_aq_status = pf->hw.aq.asq_last_status;
11179 i40e_release_nvm(&pf->hw);
11181 dev_info(&pf->pdev->dev,
11182 "BW settings NOT SAVED, err %s aq_err %s\n",
11183 i40e_stat_str(&pf->hw, ret),
11184 i40e_aq_str(&pf->hw, last_aq_status));
11191 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11192 * @pf: board private structure to initialize
11194 * i40e_sw_init initializes the Adapter private data structure.
11195 * Fields are initialized based on PCI device information and
11196 * OS network device settings (MTU size).
11198 static int i40e_sw_init(struct i40e_pf *pf)
11203 /* Set default capability flags */
11204 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11205 I40E_FLAG_MSI_ENABLED |
11206 I40E_FLAG_MSIX_ENABLED;
11208 /* Set default ITR */
11209 pf->rx_itr_default = I40E_ITR_RX_DEF;
11210 pf->tx_itr_default = I40E_ITR_TX_DEF;
11212 /* Depending on PF configurations, it is possible that the RSS
11213 * maximum might end up larger than the available queues
11215 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11216 pf->alloc_rss_size = 1;
11217 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11218 pf->rss_size_max = min_t(int, pf->rss_size_max,
11219 pf->hw.func_caps.num_tx_qp);
11220 if (pf->hw.func_caps.rss) {
11221 pf->flags |= I40E_FLAG_RSS_ENABLED;
11222 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11223 num_online_cpus());
11226 /* MFP mode enabled */
11227 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11228 pf->flags |= I40E_FLAG_MFP_ENABLED;
11229 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11230 if (i40e_get_partition_bw_setting(pf)) {
11231 dev_warn(&pf->pdev->dev,
11232 "Could not get partition bw settings\n");
11234 dev_info(&pf->pdev->dev,
11235 "Partition BW Min = %8.8x, Max = %8.8x\n",
11236 pf->min_bw, pf->max_bw);
11238 /* nudge the Tx scheduler */
11239 i40e_set_partition_bw_setting(pf);
11243 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11244 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11245 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11246 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11247 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11248 pf->hw.num_partitions > 1)
11249 dev_info(&pf->pdev->dev,
11250 "Flow Director Sideband mode Disabled in MFP mode\n");
11252 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11253 pf->fdir_pf_filter_count =
11254 pf->hw.func_caps.fd_filters_guaranteed;
11255 pf->hw.fdir_shared_filter_count =
11256 pf->hw.func_caps.fd_filters_best_effort;
11259 if (pf->hw.mac.type == I40E_MAC_X722) {
11260 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11261 I40E_HW_128_QP_RSS_CAPABLE |
11262 I40E_HW_ATR_EVICT_CAPABLE |
11263 I40E_HW_WB_ON_ITR_CAPABLE |
11264 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11265 I40E_HW_NO_PCI_LINK_CHECK |
11266 I40E_HW_USE_SET_LLDP_MIB |
11267 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11268 I40E_HW_PTP_L4_CAPABLE |
11269 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11270 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11272 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11273 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11274 I40E_FDEVICT_PCTYPE_DEFAULT) {
11275 dev_warn(&pf->pdev->dev,
11276 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11277 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11279 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11280 ((pf->hw.aq.api_maj_ver == 1) &&
11281 (pf->hw.aq.api_min_ver > 4))) {
11282 /* Supported in FW API version higher than 1.4 */
11283 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11286 /* Enable HW ATR eviction if possible */
11287 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11288 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11290 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11291 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11292 (pf->hw.aq.fw_maj_ver < 4))) {
11293 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11294 /* No DCB support for FW < v4.33 */
11295 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11298 /* Disable FW LLDP if FW < v4.3 */
11299 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11300 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11301 (pf->hw.aq.fw_maj_ver < 4)))
11302 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11304 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11305 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11306 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11307 (pf->hw.aq.fw_maj_ver >= 5)))
11308 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11310 /* Enable PTP L4 if FW > v6.0 */
11311 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11312 pf->hw.aq.fw_maj_ver >= 6)
11313 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11315 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11316 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11317 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11318 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11321 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11322 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11323 /* IWARP needs one extra vector for CQP just like MISC.*/
11324 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11326 /* Stopping FW LLDP engine is supported on XL710 and X722
11327 * starting from FW versions determined in i40e_init_adminq.
11328 * Stopping the FW LLDP engine is not supported on XL710
11329 * if NPAR is functioning so unset this hw flag in this case.
11331 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11332 pf->hw.func_caps.npar_enable &&
11333 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
11334 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
11336 #ifdef CONFIG_PCI_IOV
11337 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11338 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11339 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11340 pf->num_req_vfs = min_t(int,
11341 pf->hw.func_caps.num_vfs,
11342 I40E_MAX_VF_COUNT);
11344 #endif /* CONFIG_PCI_IOV */
11345 pf->eeprom_version = 0xDEAD;
11346 pf->lan_veb = I40E_NO_VEB;
11347 pf->lan_vsi = I40E_NO_VSI;
11349 /* By default FW has this off for performance reasons */
11350 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11352 /* set up queue assignment tracking */
11353 size = sizeof(struct i40e_lump_tracking)
11354 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11355 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11356 if (!pf->qp_pile) {
11360 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11361 pf->qp_pile->search_hint = 0;
11363 pf->tx_timeout_recovery_level = 1;
11365 mutex_init(&pf->switch_mutex);
11372 * i40e_set_ntuple - set the ntuple feature flag and take action
11373 * @pf: board private structure to initialize
11374 * @features: the feature set that the stack is suggesting
11376 * returns a bool to indicate if reset needs to happen
11378 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11380 bool need_reset = false;
11382 /* Check if Flow Director n-tuple support was enabled or disabled. If
11383 * the state changed, we need to reset.
11385 if (features & NETIF_F_NTUPLE) {
11386 /* Enable filters and mark for reset */
11387 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11389 /* enable FD_SB only if there is MSI-X vector and no cloud
11392 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
11393 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11394 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11397 /* turn off filters, mark for reset and clear SW filter list */
11398 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11400 i40e_fdir_filter_exit(pf);
11402 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11403 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
11404 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11406 /* reset fd counters */
11407 pf->fd_add_err = 0;
11408 pf->fd_atr_cnt = 0;
11409 /* if ATR was auto disabled it can be re-enabled. */
11410 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
11411 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11412 (I40E_DEBUG_FD & pf->hw.debug_mask))
11413 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
11419 * i40e_clear_rss_lut - clear the rx hash lookup table
11420 * @vsi: the VSI being configured
11422 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11424 struct i40e_pf *pf = vsi->back;
11425 struct i40e_hw *hw = &pf->hw;
11426 u16 vf_id = vsi->vf_id;
11429 if (vsi->type == I40E_VSI_MAIN) {
11430 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11431 wr32(hw, I40E_PFQF_HLUT(i), 0);
11432 } else if (vsi->type == I40E_VSI_SRIOV) {
11433 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11434 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11436 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11441 * i40e_set_features - set the netdev feature flags
11442 * @netdev: ptr to the netdev being adjusted
11443 * @features: the feature set that the stack is suggesting
11444 * Note: expects to be called while under rtnl_lock()
11446 static int i40e_set_features(struct net_device *netdev,
11447 netdev_features_t features)
11449 struct i40e_netdev_priv *np = netdev_priv(netdev);
11450 struct i40e_vsi *vsi = np->vsi;
11451 struct i40e_pf *pf = vsi->back;
11454 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11455 i40e_pf_config_rss(pf);
11456 else if (!(features & NETIF_F_RXHASH) &&
11457 netdev->features & NETIF_F_RXHASH)
11458 i40e_clear_rss_lut(vsi);
11460 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11461 i40e_vlan_stripping_enable(vsi);
11463 i40e_vlan_stripping_disable(vsi);
11465 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11466 dev_err(&pf->pdev->dev,
11467 "Offloaded tc filters active, can't turn hw_tc_offload off");
11471 need_reset = i40e_set_ntuple(pf, features);
11474 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11480 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11481 * @pf: board private structure
11482 * @port: The UDP port to look up
11484 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11486 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11490 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11491 /* Do not report ports with pending deletions as
11494 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11496 if (pf->udp_ports[i].port == port)
11504 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11505 * @netdev: This physical port's netdev
11506 * @ti: Tunnel endpoint information
11508 static void i40e_udp_tunnel_add(struct net_device *netdev,
11509 struct udp_tunnel_info *ti)
11511 struct i40e_netdev_priv *np = netdev_priv(netdev);
11512 struct i40e_vsi *vsi = np->vsi;
11513 struct i40e_pf *pf = vsi->back;
11514 u16 port = ntohs(ti->port);
11518 idx = i40e_get_udp_port_idx(pf, port);
11520 /* Check if port already exists */
11521 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11522 netdev_info(netdev, "port %d already offloaded\n", port);
11526 /* Now check if there is space to add the new port */
11527 next_idx = i40e_get_udp_port_idx(pf, 0);
11529 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11530 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11535 switch (ti->type) {
11536 case UDP_TUNNEL_TYPE_VXLAN:
11537 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11539 case UDP_TUNNEL_TYPE_GENEVE:
11540 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11542 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11548 /* New port: add it and mark its index in the bitmap */
11549 pf->udp_ports[next_idx].port = port;
11550 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
11551 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11552 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11556 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11557 * @netdev: This physical port's netdev
11558 * @ti: Tunnel endpoint information
11560 static void i40e_udp_tunnel_del(struct net_device *netdev,
11561 struct udp_tunnel_info *ti)
11563 struct i40e_netdev_priv *np = netdev_priv(netdev);
11564 struct i40e_vsi *vsi = np->vsi;
11565 struct i40e_pf *pf = vsi->back;
11566 u16 port = ntohs(ti->port);
11569 idx = i40e_get_udp_port_idx(pf, port);
11571 /* Check if port already exists */
11572 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11575 switch (ti->type) {
11576 case UDP_TUNNEL_TYPE_VXLAN:
11577 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11580 case UDP_TUNNEL_TYPE_GENEVE:
11581 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11588 /* if port exists, set it to 0 (mark for deletion)
11589 * and make it pending
11591 pf->udp_ports[idx].port = 0;
11593 /* Toggle pending bit instead of setting it. This way if we are
11594 * deleting a port that has yet to be added we just clear the pending
11595 * bit and don't have to worry about it.
11597 pf->pending_udp_bitmap ^= BIT_ULL(idx);
11598 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11602 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11606 static int i40e_get_phys_port_id(struct net_device *netdev,
11607 struct netdev_phys_item_id *ppid)
11609 struct i40e_netdev_priv *np = netdev_priv(netdev);
11610 struct i40e_pf *pf = np->vsi->back;
11611 struct i40e_hw *hw = &pf->hw;
11613 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
11614 return -EOPNOTSUPP;
11616 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11617 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11623 * i40e_ndo_fdb_add - add an entry to the hardware database
11624 * @ndm: the input from the stack
11625 * @tb: pointer to array of nladdr (unused)
11626 * @dev: the net device pointer
11627 * @addr: the MAC address entry being added
11629 * @flags: instructions from stack about fdb operation
11631 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11632 struct net_device *dev,
11633 const unsigned char *addr, u16 vid,
11635 struct netlink_ext_ack *extack)
11637 struct i40e_netdev_priv *np = netdev_priv(dev);
11638 struct i40e_pf *pf = np->vsi->back;
11641 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11642 return -EOPNOTSUPP;
11645 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11649 /* Hardware does not support aging addresses so if a
11650 * ndm_state is given only allow permanent addresses
11652 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11653 netdev_info(dev, "FDB only supports static addresses\n");
11657 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11658 err = dev_uc_add_excl(dev, addr);
11659 else if (is_multicast_ether_addr(addr))
11660 err = dev_mc_add_excl(dev, addr);
11664 /* Only return duplicate errors if NLM_F_EXCL is set */
11665 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11672 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11673 * @dev: the netdev being configured
11674 * @nlh: RTNL message
11675 * @flags: bridge flags
11676 * @extack: netlink extended ack
11678 * Inserts a new hardware bridge if not already created and
11679 * enables the bridging mode requested (VEB or VEPA). If the
11680 * hardware bridge has already been inserted and the request
11681 * is to change the mode then that requires a PF reset to
11682 * allow rebuild of the components with required hardware
11683 * bridge mode enabled.
11685 * Note: expects to be called while under rtnl_lock()
11687 static int i40e_ndo_bridge_setlink(struct net_device *dev,
11688 struct nlmsghdr *nlh,
11690 struct netlink_ext_ack *extack)
11692 struct i40e_netdev_priv *np = netdev_priv(dev);
11693 struct i40e_vsi *vsi = np->vsi;
11694 struct i40e_pf *pf = vsi->back;
11695 struct i40e_veb *veb = NULL;
11696 struct nlattr *attr, *br_spec;
11699 /* Only for PF VSI for now */
11700 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11701 return -EOPNOTSUPP;
11703 /* Find the HW bridge for PF VSI */
11704 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11705 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11709 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11711 nla_for_each_nested(attr, br_spec, rem) {
11714 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11717 mode = nla_get_u16(attr);
11718 if ((mode != BRIDGE_MODE_VEPA) &&
11719 (mode != BRIDGE_MODE_VEB))
11722 /* Insert a new HW bridge */
11724 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11725 vsi->tc_config.enabled_tc);
11727 veb->bridge_mode = mode;
11728 i40e_config_bridge_mode(veb);
11730 /* No Bridge HW offload available */
11734 } else if (mode != veb->bridge_mode) {
11735 /* Existing HW bridge but different mode needs reset */
11736 veb->bridge_mode = mode;
11737 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11738 if (mode == BRIDGE_MODE_VEB)
11739 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11741 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
11742 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11751 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11754 * @seq: RTNL message seq #
11755 * @dev: the netdev being configured
11756 * @filter_mask: unused
11757 * @nlflags: netlink flags passed in
11759 * Return the mode in which the hardware bridge is operating in
11762 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11763 struct net_device *dev,
11764 u32 __always_unused filter_mask,
11767 struct i40e_netdev_priv *np = netdev_priv(dev);
11768 struct i40e_vsi *vsi = np->vsi;
11769 struct i40e_pf *pf = vsi->back;
11770 struct i40e_veb *veb = NULL;
11773 /* Only for PF VSI for now */
11774 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11775 return -EOPNOTSUPP;
11777 /* Find the HW bridge for the PF VSI */
11778 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11779 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11786 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
11787 0, 0, nlflags, filter_mask, NULL);
11791 * i40e_features_check - Validate encapsulated packet conforms to limits
11793 * @dev: This physical port's netdev
11794 * @features: Offload features that the stack believes apply
11796 static netdev_features_t i40e_features_check(struct sk_buff *skb,
11797 struct net_device *dev,
11798 netdev_features_t features)
11802 /* No point in doing any of this if neither checksum nor GSO are
11803 * being requested for this frame. We can rule out both by just
11804 * checking for CHECKSUM_PARTIAL
11806 if (skb->ip_summed != CHECKSUM_PARTIAL)
11809 /* We cannot support GSO if the MSS is going to be less than
11810 * 64 bytes. If it is then we need to drop support for GSO.
11812 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11813 features &= ~NETIF_F_GSO_MASK;
11815 /* MACLEN can support at most 63 words */
11816 len = skb_network_header(skb) - skb->data;
11817 if (len & ~(63 * 2))
11820 /* IPLEN and EIPLEN can support at most 127 dwords */
11821 len = skb_transport_header(skb) - skb_network_header(skb);
11822 if (len & ~(127 * 4))
11825 if (skb->encapsulation) {
11826 /* L4TUNLEN can support 127 words */
11827 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11828 if (len & ~(127 * 2))
11831 /* IPLEN can support at most 127 dwords */
11832 len = skb_inner_transport_header(skb) -
11833 skb_inner_network_header(skb);
11834 if (len & ~(127 * 4))
11838 /* No need to validate L4LEN as TCP is the only protocol with a
11839 * a flexible value and we support all possible values supported
11840 * by TCP, which is at most 15 dwords
11845 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11849 * i40e_xdp_setup - add/remove an XDP program
11850 * @vsi: VSI to changed
11851 * @prog: XDP program
11853 static int i40e_xdp_setup(struct i40e_vsi *vsi,
11854 struct bpf_prog *prog)
11856 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11857 struct i40e_pf *pf = vsi->back;
11858 struct bpf_prog *old_prog;
11862 /* Don't allow frames that span over multiple buffers */
11863 if (frame_size > vsi->rx_buf_len)
11866 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
11869 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11870 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
11873 i40e_prep_for_reset(pf, true);
11875 old_prog = xchg(&vsi->xdp_prog, prog);
11878 i40e_reset_and_rebuild(pf, true, true);
11880 for (i = 0; i < vsi->num_queue_pairs; i++)
11881 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
11884 bpf_prog_put(old_prog);
11886 /* Kick start the NAPI context if there is an AF_XDP socket open
11887 * on that queue id. This so that receiving will start.
11889 if (need_reset && prog)
11890 for (i = 0; i < vsi->num_queue_pairs; i++)
11891 if (vsi->xdp_rings[i]->xsk_umem)
11892 (void)i40e_xsk_async_xmit(vsi->netdev, i);
11898 * i40e_enter_busy_conf - Enters busy config state
11901 * Returns 0 on success, <0 for failure.
11903 static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
11905 struct i40e_pf *pf = vsi->back;
11908 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
11912 usleep_range(1000, 2000);
11919 * i40e_exit_busy_conf - Exits busy config state
11922 static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
11924 struct i40e_pf *pf = vsi->back;
11926 clear_bit(__I40E_CONFIG_BUSY, pf->state);
11930 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
11932 * @queue_pair: queue pair
11934 static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
11936 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
11937 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
11938 memset(&vsi->tx_rings[queue_pair]->stats, 0,
11939 sizeof(vsi->tx_rings[queue_pair]->stats));
11940 if (i40e_enabled_xdp_vsi(vsi)) {
11941 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
11942 sizeof(vsi->xdp_rings[queue_pair]->stats));
11947 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
11949 * @queue_pair: queue pair
11951 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
11953 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
11954 if (i40e_enabled_xdp_vsi(vsi)) {
11955 /* Make sure that in-progress ndo_xdp_xmit calls are
11959 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
11961 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
11965 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
11967 * @queue_pair: queue pair
11968 * @enable: true for enable, false for disable
11970 static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
11973 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
11974 struct i40e_q_vector *q_vector = rxr->q_vector;
11979 /* All rings in a qp belong to the same qvector. */
11980 if (q_vector->rx.ring || q_vector->tx.ring) {
11982 napi_enable(&q_vector->napi);
11984 napi_disable(&q_vector->napi);
11989 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
11991 * @queue_pair: queue pair
11992 * @enable: true for enable, false for disable
11994 * Returns 0 on success, <0 on failure.
11996 static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
11999 struct i40e_pf *pf = vsi->back;
12002 pf_q = vsi->base_queue + queue_pair;
12003 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q,
12004 false /*is xdp*/, enable);
12006 dev_info(&pf->pdev->dev,
12007 "VSI seid %d Tx ring %d %sable timeout\n",
12008 vsi->seid, pf_q, (enable ? "en" : "dis"));
12012 i40e_control_rx_q(pf, pf_q, enable);
12013 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
12015 dev_info(&pf->pdev->dev,
12016 "VSI seid %d Rx ring %d %sable timeout\n",
12017 vsi->seid, pf_q, (enable ? "en" : "dis"));
12021 /* Due to HW errata, on Rx disable only, the register can
12022 * indicate done before it really is. Needs 50ms to be sure
12027 if (!i40e_enabled_xdp_vsi(vsi))
12030 ret = i40e_control_wait_tx_q(vsi->seid, pf,
12031 pf_q + vsi->alloc_queue_pairs,
12032 true /*is xdp*/, enable);
12034 dev_info(&pf->pdev->dev,
12035 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12036 vsi->seid, pf_q, (enable ? "en" : "dis"));
12043 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12045 * @queue_pair: queue_pair
12047 static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
12049 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12050 struct i40e_pf *pf = vsi->back;
12051 struct i40e_hw *hw = &pf->hw;
12053 /* All rings in a qp belong to the same qvector. */
12054 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12055 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx);
12057 i40e_irq_dynamic_enable_icr0(pf);
12063 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12065 * @queue_pair: queue_pair
12067 static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
12069 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
12070 struct i40e_pf *pf = vsi->back;
12071 struct i40e_hw *hw = &pf->hw;
12073 /* For simplicity, instead of removing the qp interrupt causes
12074 * from the interrupt linked list, we simply disable the interrupt, and
12075 * leave the list intact.
12077 * All rings in a qp belong to the same qvector.
12079 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12080 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
12082 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
12084 synchronize_irq(pf->msix_entries[intpf].vector);
12086 /* Legacy and MSI mode - this stops all interrupt handling */
12087 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
12088 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
12090 synchronize_irq(pf->pdev->irq);
12095 * i40e_queue_pair_disable - Disables a queue pair
12097 * @queue_pair: queue pair
12099 * Returns 0 on success, <0 on failure.
12101 int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
12105 err = i40e_enter_busy_conf(vsi);
12109 i40e_queue_pair_disable_irq(vsi, queue_pair);
12110 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
12111 i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
12112 i40e_queue_pair_clean_rings(vsi, queue_pair);
12113 i40e_queue_pair_reset_stats(vsi, queue_pair);
12119 * i40e_queue_pair_enable - Enables a queue pair
12121 * @queue_pair: queue pair
12123 * Returns 0 on success, <0 on failure.
12125 int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
12129 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]);
12133 if (i40e_enabled_xdp_vsi(vsi)) {
12134 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]);
12139 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]);
12143 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */);
12144 i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */);
12145 i40e_queue_pair_enable_irq(vsi, queue_pair);
12147 i40e_exit_busy_conf(vsi);
12153 * i40e_xdp - implements ndo_bpf for i40e
12155 * @xdp: XDP command
12157 static int i40e_xdp(struct net_device *dev,
12158 struct netdev_bpf *xdp)
12160 struct i40e_netdev_priv *np = netdev_priv(dev);
12161 struct i40e_vsi *vsi = np->vsi;
12163 if (vsi->type != I40E_VSI_MAIN)
12166 switch (xdp->command) {
12167 case XDP_SETUP_PROG:
12168 return i40e_xdp_setup(vsi, xdp->prog);
12169 case XDP_QUERY_PROG:
12170 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
12172 case XDP_SETUP_XSK_UMEM:
12173 return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
12174 xdp->xsk.queue_id);
12180 static const struct net_device_ops i40e_netdev_ops = {
12181 .ndo_open = i40e_open,
12182 .ndo_stop = i40e_close,
12183 .ndo_start_xmit = i40e_lan_xmit_frame,
12184 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
12185 .ndo_set_rx_mode = i40e_set_rx_mode,
12186 .ndo_validate_addr = eth_validate_addr,
12187 .ndo_set_mac_address = i40e_set_mac,
12188 .ndo_change_mtu = i40e_change_mtu,
12189 .ndo_do_ioctl = i40e_ioctl,
12190 .ndo_tx_timeout = i40e_tx_timeout,
12191 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
12192 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
12193 #ifdef CONFIG_NET_POLL_CONTROLLER
12194 .ndo_poll_controller = i40e_netpoll,
12196 .ndo_setup_tc = __i40e_setup_tc,
12197 .ndo_set_features = i40e_set_features,
12198 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
12199 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
12200 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
12201 .ndo_get_vf_config = i40e_ndo_get_vf_config,
12202 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
12203 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
12204 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
12205 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
12206 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
12207 .ndo_get_phys_port_id = i40e_get_phys_port_id,
12208 .ndo_fdb_add = i40e_ndo_fdb_add,
12209 .ndo_features_check = i40e_features_check,
12210 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
12211 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
12212 .ndo_bpf = i40e_xdp,
12213 .ndo_xdp_xmit = i40e_xdp_xmit,
12214 .ndo_xsk_async_xmit = i40e_xsk_async_xmit,
12218 * i40e_config_netdev - Setup the netdev flags
12219 * @vsi: the VSI being configured
12221 * Returns 0 on success, negative value on failure
12223 static int i40e_config_netdev(struct i40e_vsi *vsi)
12225 struct i40e_pf *pf = vsi->back;
12226 struct i40e_hw *hw = &pf->hw;
12227 struct i40e_netdev_priv *np;
12228 struct net_device *netdev;
12229 u8 broadcast[ETH_ALEN];
12230 u8 mac_addr[ETH_ALEN];
12232 netdev_features_t hw_enc_features;
12233 netdev_features_t hw_features;
12235 etherdev_size = sizeof(struct i40e_netdev_priv);
12236 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12240 vsi->netdev = netdev;
12241 np = netdev_priv(netdev);
12244 hw_enc_features = NETIF_F_SG |
12246 NETIF_F_IPV6_CSUM |
12248 NETIF_F_SOFT_FEATURES |
12253 NETIF_F_GSO_GRE_CSUM |
12254 NETIF_F_GSO_PARTIAL |
12255 NETIF_F_GSO_IPXIP4 |
12256 NETIF_F_GSO_IPXIP6 |
12257 NETIF_F_GSO_UDP_TUNNEL |
12258 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12264 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12265 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12267 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12269 netdev->hw_enc_features |= hw_enc_features;
12271 /* record features VLANs can make use of */
12272 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12274 hw_features = hw_enc_features |
12275 NETIF_F_HW_VLAN_CTAG_TX |
12276 NETIF_F_HW_VLAN_CTAG_RX;
12278 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12279 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12281 netdev->hw_features |= hw_features;
12283 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12284 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12286 if (vsi->type == I40E_VSI_MAIN) {
12287 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12288 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12289 /* The following steps are necessary for two reasons. First,
12290 * some older NVM configurations load a default MAC-VLAN
12291 * filter that will accept any tagged packet, and we want to
12292 * replace this with a normal filter. Additionally, it is
12293 * possible our MAC address was provided by the platform using
12294 * Open Firmware or similar.
12296 * Thus, we need to remove the default filter and install one
12297 * specific to the MAC address.
12299 i40e_rm_default_mac_filter(vsi, mac_addr);
12300 spin_lock_bh(&vsi->mac_filter_hash_lock);
12301 i40e_add_mac_filter(vsi, mac_addr);
12302 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12304 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12305 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12306 * the end, which is 4 bytes long, so force truncation of the
12307 * original name by IFNAMSIZ - 4
12309 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12311 pf->vsi[pf->lan_vsi]->netdev->name);
12312 eth_random_addr(mac_addr);
12314 spin_lock_bh(&vsi->mac_filter_hash_lock);
12315 i40e_add_mac_filter(vsi, mac_addr);
12316 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12319 /* Add the broadcast filter so that we initially will receive
12320 * broadcast packets. Note that when a new VLAN is first added the
12321 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
12322 * specific filters as part of transitioning into "vlan" operation.
12323 * When more VLANs are added, the driver will copy each existing MAC
12324 * filter and add it for the new VLAN.
12326 * Broadcast filters are handled specially by
12327 * i40e_sync_filters_subtask, as the driver must to set the broadcast
12328 * promiscuous bit instead of adding this directly as a MAC/VLAN
12329 * filter. The subtask will update the correct broadcast promiscuous
12330 * bits as VLANs become active or inactive.
12332 eth_broadcast_addr(broadcast);
12333 spin_lock_bh(&vsi->mac_filter_hash_lock);
12334 i40e_add_mac_filter(vsi, broadcast);
12335 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12337 ether_addr_copy(netdev->dev_addr, mac_addr);
12338 ether_addr_copy(netdev->perm_addr, mac_addr);
12340 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
12341 netdev->neigh_priv_len = sizeof(u32) * 4;
12343 netdev->priv_flags |= IFF_UNICAST_FLT;
12344 netdev->priv_flags |= IFF_SUPP_NOFCS;
12345 /* Setup netdev TC information */
12346 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
12348 netdev->netdev_ops = &i40e_netdev_ops;
12349 netdev->watchdog_timeo = 5 * HZ;
12350 i40e_set_ethtool_ops(netdev);
12352 /* MTU range: 68 - 9706 */
12353 netdev->min_mtu = ETH_MIN_MTU;
12354 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
12360 * i40e_vsi_delete - Delete a VSI from the switch
12361 * @vsi: the VSI being removed
12363 * Returns 0 on success, negative value on failure
12365 static void i40e_vsi_delete(struct i40e_vsi *vsi)
12367 /* remove default VSI is not allowed */
12368 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
12371 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
12375 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
12376 * @vsi: the VSI being queried
12378 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
12380 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
12382 struct i40e_veb *veb;
12383 struct i40e_pf *pf = vsi->back;
12385 /* Uplink is not a bridge so default to VEB */
12386 if (vsi->veb_idx == I40E_NO_VEB)
12389 veb = pf->veb[vsi->veb_idx];
12391 dev_info(&pf->pdev->dev,
12392 "There is no veb associated with the bridge\n");
12396 /* Uplink is a bridge in VEPA mode */
12397 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
12400 /* Uplink is a bridge in VEB mode */
12404 /* VEPA is now default bridge, so return 0 */
12409 * i40e_add_vsi - Add a VSI to the switch
12410 * @vsi: the VSI being configured
12412 * This initializes a VSI context depending on the VSI type to be added and
12413 * passes it down to the add_vsi aq command.
12415 static int i40e_add_vsi(struct i40e_vsi *vsi)
12418 struct i40e_pf *pf = vsi->back;
12419 struct i40e_hw *hw = &pf->hw;
12420 struct i40e_vsi_context ctxt;
12421 struct i40e_mac_filter *f;
12422 struct hlist_node *h;
12425 u8 enabled_tc = 0x1; /* TC0 enabled */
12428 memset(&ctxt, 0, sizeof(ctxt));
12429 switch (vsi->type) {
12430 case I40E_VSI_MAIN:
12431 /* The PF's main VSI is already setup as part of the
12432 * device initialization, so we'll not bother with
12433 * the add_vsi call, but we will retrieve the current
12436 ctxt.seid = pf->main_vsi_seid;
12437 ctxt.pf_num = pf->hw.pf_id;
12439 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
12440 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12442 dev_info(&pf->pdev->dev,
12443 "couldn't get PF vsi config, err %s aq_err %s\n",
12444 i40e_stat_str(&pf->hw, ret),
12445 i40e_aq_str(&pf->hw,
12446 pf->hw.aq.asq_last_status));
12449 vsi->info = ctxt.info;
12450 vsi->info.valid_sections = 0;
12452 vsi->seid = ctxt.seid;
12453 vsi->id = ctxt.vsi_number;
12455 enabled_tc = i40e_pf_get_tc_map(pf);
12457 /* Source pruning is enabled by default, so the flag is
12458 * negative logic - if it's set, we need to fiddle with
12459 * the VSI to disable source pruning.
12461 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
12462 memset(&ctxt, 0, sizeof(ctxt));
12463 ctxt.seid = pf->main_vsi_seid;
12464 ctxt.pf_num = pf->hw.pf_id;
12466 ctxt.info.valid_sections |=
12467 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12468 ctxt.info.switch_id =
12469 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
12470 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12472 dev_info(&pf->pdev->dev,
12473 "update vsi failed, err %s aq_err %s\n",
12474 i40e_stat_str(&pf->hw, ret),
12475 i40e_aq_str(&pf->hw,
12476 pf->hw.aq.asq_last_status));
12482 /* MFP mode setup queue map and update VSI */
12483 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
12484 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
12485 memset(&ctxt, 0, sizeof(ctxt));
12486 ctxt.seid = pf->main_vsi_seid;
12487 ctxt.pf_num = pf->hw.pf_id;
12489 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
12490 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12492 dev_info(&pf->pdev->dev,
12493 "update vsi failed, err %s aq_err %s\n",
12494 i40e_stat_str(&pf->hw, ret),
12495 i40e_aq_str(&pf->hw,
12496 pf->hw.aq.asq_last_status));
12500 /* update the local VSI info queue map */
12501 i40e_vsi_update_queue_map(vsi, &ctxt);
12502 vsi->info.valid_sections = 0;
12504 /* Default/Main VSI is only enabled for TC0
12505 * reconfigure it to enable all TCs that are
12506 * available on the port in SFP mode.
12507 * For MFP case the iSCSI PF would use this
12508 * flow to enable LAN+iSCSI TC.
12510 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12512 /* Single TC condition is not fatal,
12513 * message and continue
12515 dev_info(&pf->pdev->dev,
12516 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12518 i40e_stat_str(&pf->hw, ret),
12519 i40e_aq_str(&pf->hw,
12520 pf->hw.aq.asq_last_status));
12525 case I40E_VSI_FDIR:
12526 ctxt.pf_num = hw->pf_id;
12528 ctxt.uplink_seid = vsi->uplink_seid;
12529 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12530 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12531 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12532 (i40e_is_vsi_uplink_mode_veb(vsi))) {
12533 ctxt.info.valid_sections |=
12534 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12535 ctxt.info.switch_id =
12536 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12538 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12541 case I40E_VSI_VMDQ2:
12542 ctxt.pf_num = hw->pf_id;
12544 ctxt.uplink_seid = vsi->uplink_seid;
12545 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12546 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12548 /* This VSI is connected to VEB so the switch_id
12549 * should be set to zero by default.
12551 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12552 ctxt.info.valid_sections |=
12553 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12554 ctxt.info.switch_id =
12555 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12558 /* Setup the VSI tx/rx queue map for TC0 only for now */
12559 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12562 case I40E_VSI_SRIOV:
12563 ctxt.pf_num = hw->pf_id;
12564 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12565 ctxt.uplink_seid = vsi->uplink_seid;
12566 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12567 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12569 /* This VSI is connected to VEB so the switch_id
12570 * should be set to zero by default.
12572 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12573 ctxt.info.valid_sections |=
12574 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12575 ctxt.info.switch_id =
12576 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12579 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12580 ctxt.info.valid_sections |=
12581 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12582 ctxt.info.queueing_opt_flags |=
12583 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12584 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
12587 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12588 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
12589 if (pf->vf[vsi->vf_id].spoofchk) {
12590 ctxt.info.valid_sections |=
12591 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12592 ctxt.info.sec_flags |=
12593 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12594 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12596 /* Setup the VSI tx/rx queue map for TC0 only for now */
12597 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12600 case I40E_VSI_IWARP:
12601 /* send down message to iWARP */
12608 if (vsi->type != I40E_VSI_MAIN) {
12609 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12611 dev_info(&vsi->back->pdev->dev,
12612 "add vsi failed, err %s aq_err %s\n",
12613 i40e_stat_str(&pf->hw, ret),
12614 i40e_aq_str(&pf->hw,
12615 pf->hw.aq.asq_last_status));
12619 vsi->info = ctxt.info;
12620 vsi->info.valid_sections = 0;
12621 vsi->seid = ctxt.seid;
12622 vsi->id = ctxt.vsi_number;
12625 vsi->active_filters = 0;
12626 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
12627 spin_lock_bh(&vsi->mac_filter_hash_lock);
12628 /* If macvlan filters already exist, force them to get loaded */
12629 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
12630 f->state = I40E_FILTER_NEW;
12633 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12636 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
12637 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
12640 /* Update VSI BW information */
12641 ret = i40e_vsi_get_bw_info(vsi);
12643 dev_info(&pf->pdev->dev,
12644 "couldn't get vsi bw info, err %s aq_err %s\n",
12645 i40e_stat_str(&pf->hw, ret),
12646 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12647 /* VSI is already added so not tearing that up */
12656 * i40e_vsi_release - Delete a VSI and free its resources
12657 * @vsi: the VSI being removed
12659 * Returns 0 on success or < 0 on error
12661 int i40e_vsi_release(struct i40e_vsi *vsi)
12663 struct i40e_mac_filter *f;
12664 struct hlist_node *h;
12665 struct i40e_veb *veb = NULL;
12666 struct i40e_pf *pf;
12672 /* release of a VEB-owner or last VSI is not allowed */
12673 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12674 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12675 vsi->seid, vsi->uplink_seid);
12678 if (vsi == pf->vsi[pf->lan_vsi] &&
12679 !test_bit(__I40E_DOWN, pf->state)) {
12680 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12684 uplink_seid = vsi->uplink_seid;
12685 if (vsi->type != I40E_VSI_SRIOV) {
12686 if (vsi->netdev_registered) {
12687 vsi->netdev_registered = false;
12689 /* results in a call to i40e_close() */
12690 unregister_netdev(vsi->netdev);
12693 i40e_vsi_close(vsi);
12695 i40e_vsi_disable_irq(vsi);
12698 spin_lock_bh(&vsi->mac_filter_hash_lock);
12700 /* clear the sync flag on all filters */
12702 __dev_uc_unsync(vsi->netdev, NULL);
12703 __dev_mc_unsync(vsi->netdev, NULL);
12706 /* make sure any remaining filters are marked for deletion */
12707 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
12708 __i40e_del_filter(vsi, f);
12710 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12712 i40e_sync_vsi_filters(vsi);
12714 i40e_vsi_delete(vsi);
12715 i40e_vsi_free_q_vectors(vsi);
12717 free_netdev(vsi->netdev);
12718 vsi->netdev = NULL;
12720 i40e_vsi_clear_rings(vsi);
12721 i40e_vsi_clear(vsi);
12723 /* If this was the last thing on the VEB, except for the
12724 * controlling VSI, remove the VEB, which puts the controlling
12725 * VSI onto the next level down in the switch.
12727 * Well, okay, there's one more exception here: don't remove
12728 * the orphan VEBs yet. We'll wait for an explicit remove request
12729 * from up the network stack.
12731 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
12733 pf->vsi[i]->uplink_seid == uplink_seid &&
12734 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12735 n++; /* count the VSIs */
12738 for (i = 0; i < I40E_MAX_VEB; i++) {
12741 if (pf->veb[i]->uplink_seid == uplink_seid)
12742 n++; /* count the VEBs */
12743 if (pf->veb[i]->seid == uplink_seid)
12746 if (n == 0 && veb && veb->uplink_seid != 0)
12747 i40e_veb_release(veb);
12753 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12754 * @vsi: ptr to the VSI
12756 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12757 * corresponding SW VSI structure and initializes num_queue_pairs for the
12758 * newly allocated VSI.
12760 * Returns 0 on success or negative on failure
12762 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12765 struct i40e_pf *pf = vsi->back;
12767 if (vsi->q_vectors[0]) {
12768 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12773 if (vsi->base_vector) {
12774 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
12775 vsi->seid, vsi->base_vector);
12779 ret = i40e_vsi_alloc_q_vectors(vsi);
12781 dev_info(&pf->pdev->dev,
12782 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12783 vsi->num_q_vectors, vsi->seid, ret);
12784 vsi->num_q_vectors = 0;
12785 goto vector_setup_out;
12788 /* In Legacy mode, we do not have to get any other vector since we
12789 * piggyback on the misc/ICR0 for queue interrupts.
12791 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12793 if (vsi->num_q_vectors)
12794 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12795 vsi->num_q_vectors, vsi->idx);
12796 if (vsi->base_vector < 0) {
12797 dev_info(&pf->pdev->dev,
12798 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12799 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
12800 i40e_vsi_free_q_vectors(vsi);
12802 goto vector_setup_out;
12810 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12811 * @vsi: pointer to the vsi.
12813 * This re-allocates a vsi's queue resources.
12815 * Returns pointer to the successfully allocated and configured VSI sw struct
12816 * on success, otherwise returns NULL on failure.
12818 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12820 u16 alloc_queue_pairs;
12821 struct i40e_pf *pf;
12830 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12831 i40e_vsi_clear_rings(vsi);
12833 i40e_vsi_free_arrays(vsi, false);
12834 i40e_set_num_rings_in_vsi(vsi);
12835 ret = i40e_vsi_alloc_arrays(vsi, false);
12839 alloc_queue_pairs = vsi->alloc_queue_pairs *
12840 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12842 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12844 dev_info(&pf->pdev->dev,
12845 "failed to get tracking for %d queues for VSI %d err %d\n",
12846 alloc_queue_pairs, vsi->seid, ret);
12849 vsi->base_queue = ret;
12851 /* Update the FW view of the VSI. Force a reset of TC and queue
12852 * layout configurations.
12854 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12855 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12856 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12857 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
12858 if (vsi->type == I40E_VSI_MAIN)
12859 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
12861 /* assign it some queues */
12862 ret = i40e_alloc_rings(vsi);
12866 /* map all of the rings to the q_vectors */
12867 i40e_vsi_map_rings_to_vectors(vsi);
12871 i40e_vsi_free_q_vectors(vsi);
12872 if (vsi->netdev_registered) {
12873 vsi->netdev_registered = false;
12874 unregister_netdev(vsi->netdev);
12875 free_netdev(vsi->netdev);
12876 vsi->netdev = NULL;
12878 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12880 i40e_vsi_clear(vsi);
12885 * i40e_vsi_setup - Set up a VSI by a given type
12886 * @pf: board private structure
12888 * @uplink_seid: the switch element to link to
12889 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12891 * This allocates the sw VSI structure and its queue resources, then add a VSI
12892 * to the identified VEB.
12894 * Returns pointer to the successfully allocated and configure VSI sw struct on
12895 * success, otherwise returns NULL on failure.
12897 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
12898 u16 uplink_seid, u32 param1)
12900 struct i40e_vsi *vsi = NULL;
12901 struct i40e_veb *veb = NULL;
12902 u16 alloc_queue_pairs;
12906 /* The requested uplink_seid must be either
12907 * - the PF's port seid
12908 * no VEB is needed because this is the PF
12909 * or this is a Flow Director special case VSI
12910 * - seid of an existing VEB
12911 * - seid of a VSI that owns an existing VEB
12912 * - seid of a VSI that doesn't own a VEB
12913 * a new VEB is created and the VSI becomes the owner
12914 * - seid of the PF VSI, which is what creates the first VEB
12915 * this is a special case of the previous
12917 * Find which uplink_seid we were given and create a new VEB if needed
12919 for (i = 0; i < I40E_MAX_VEB; i++) {
12920 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
12926 if (!veb && uplink_seid != pf->mac_seid) {
12928 for (i = 0; i < pf->num_alloc_vsi; i++) {
12929 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
12935 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
12940 if (vsi->uplink_seid == pf->mac_seid)
12941 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
12942 vsi->tc_config.enabled_tc);
12943 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
12944 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12945 vsi->tc_config.enabled_tc);
12947 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
12948 dev_info(&vsi->back->pdev->dev,
12949 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12952 /* We come up by default in VEPA mode if SRIOV is not
12953 * already enabled, in which case we can't force VEPA
12956 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
12957 veb->bridge_mode = BRIDGE_MODE_VEPA;
12958 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12960 i40e_config_bridge_mode(veb);
12962 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12963 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12967 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
12971 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12972 uplink_seid = veb->seid;
12975 /* get vsi sw struct */
12976 v_idx = i40e_vsi_mem_alloc(pf, type);
12979 vsi = pf->vsi[v_idx];
12983 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
12985 if (type == I40E_VSI_MAIN)
12986 pf->lan_vsi = v_idx;
12987 else if (type == I40E_VSI_SRIOV)
12988 vsi->vf_id = param1;
12989 /* assign it some queues */
12990 alloc_queue_pairs = vsi->alloc_queue_pairs *
12991 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12993 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12995 dev_info(&pf->pdev->dev,
12996 "failed to get tracking for %d queues for VSI %d err=%d\n",
12997 alloc_queue_pairs, vsi->seid, ret);
13000 vsi->base_queue = ret;
13002 /* get a VSI from the hardware */
13003 vsi->uplink_seid = uplink_seid;
13004 ret = i40e_add_vsi(vsi);
13008 switch (vsi->type) {
13009 /* setup the netdev if needed */
13010 case I40E_VSI_MAIN:
13011 case I40E_VSI_VMDQ2:
13012 ret = i40e_config_netdev(vsi);
13015 ret = register_netdev(vsi->netdev);
13018 vsi->netdev_registered = true;
13019 netif_carrier_off(vsi->netdev);
13020 #ifdef CONFIG_I40E_DCB
13021 /* Setup DCB netlink interface */
13022 i40e_dcbnl_setup(vsi);
13023 #endif /* CONFIG_I40E_DCB */
13026 case I40E_VSI_FDIR:
13027 /* set up vectors and rings if needed */
13028 ret = i40e_vsi_setup_vectors(vsi);
13032 ret = i40e_alloc_rings(vsi);
13036 /* map all of the rings to the q_vectors */
13037 i40e_vsi_map_rings_to_vectors(vsi);
13039 i40e_vsi_reset_stats(vsi);
13043 /* no netdev or rings for the other VSI types */
13047 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
13048 (vsi->type == I40E_VSI_VMDQ2)) {
13049 ret = i40e_vsi_config_rss(vsi);
13054 i40e_vsi_free_q_vectors(vsi);
13056 if (vsi->netdev_registered) {
13057 vsi->netdev_registered = false;
13058 unregister_netdev(vsi->netdev);
13059 free_netdev(vsi->netdev);
13060 vsi->netdev = NULL;
13063 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
13065 i40e_vsi_clear(vsi);
13071 * i40e_veb_get_bw_info - Query VEB BW information
13072 * @veb: the veb to query
13074 * Query the Tx scheduler BW configuration data for given VEB
13076 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
13078 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
13079 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
13080 struct i40e_pf *pf = veb->pf;
13081 struct i40e_hw *hw = &pf->hw;
13086 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
13089 dev_info(&pf->pdev->dev,
13090 "query veb bw config failed, err %s aq_err %s\n",
13091 i40e_stat_str(&pf->hw, ret),
13092 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13096 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
13099 dev_info(&pf->pdev->dev,
13100 "query veb bw ets config failed, err %s aq_err %s\n",
13101 i40e_stat_str(&pf->hw, ret),
13102 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
13106 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
13107 veb->bw_max_quanta = ets_data.tc_bw_max;
13108 veb->is_abs_credits = bw_data.absolute_credits_enable;
13109 veb->enabled_tc = ets_data.tc_valid_bits;
13110 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
13111 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
13112 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
13113 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
13114 veb->bw_tc_limit_credits[i] =
13115 le16_to_cpu(bw_data.tc_bw_limits[i]);
13116 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
13124 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13125 * @pf: board private structure
13127 * On error: returns error code (negative)
13128 * On success: returns vsi index in PF (positive)
13130 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
13133 struct i40e_veb *veb;
13136 /* Need to protect the allocation of switch elements at the PF level */
13137 mutex_lock(&pf->switch_mutex);
13139 /* VEB list may be fragmented if VEB creation/destruction has
13140 * been happening. We can afford to do a quick scan to look
13141 * for any free slots in the list.
13143 * find next empty veb slot, looping back around if necessary
13146 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
13148 if (i >= I40E_MAX_VEB) {
13150 goto err_alloc_veb; /* out of VEB slots! */
13153 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
13156 goto err_alloc_veb;
13160 veb->enabled_tc = 1;
13165 mutex_unlock(&pf->switch_mutex);
13170 * i40e_switch_branch_release - Delete a branch of the switch tree
13171 * @branch: where to start deleting
13173 * This uses recursion to find the tips of the branch to be
13174 * removed, deleting until we get back to and can delete this VEB.
13176 static void i40e_switch_branch_release(struct i40e_veb *branch)
13178 struct i40e_pf *pf = branch->pf;
13179 u16 branch_seid = branch->seid;
13180 u16 veb_idx = branch->idx;
13183 /* release any VEBs on this VEB - RECURSION */
13184 for (i = 0; i < I40E_MAX_VEB; i++) {
13187 if (pf->veb[i]->uplink_seid == branch->seid)
13188 i40e_switch_branch_release(pf->veb[i]);
13191 /* Release the VSIs on this VEB, but not the owner VSI.
13193 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13194 * the VEB itself, so don't use (*branch) after this loop.
13196 for (i = 0; i < pf->num_alloc_vsi; i++) {
13199 if (pf->vsi[i]->uplink_seid == branch_seid &&
13200 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
13201 i40e_vsi_release(pf->vsi[i]);
13205 /* There's one corner case where the VEB might not have been
13206 * removed, so double check it here and remove it if needed.
13207 * This case happens if the veb was created from the debugfs
13208 * commands and no VSIs were added to it.
13210 if (pf->veb[veb_idx])
13211 i40e_veb_release(pf->veb[veb_idx]);
13215 * i40e_veb_clear - remove veb struct
13216 * @veb: the veb to remove
13218 static void i40e_veb_clear(struct i40e_veb *veb)
13224 struct i40e_pf *pf = veb->pf;
13226 mutex_lock(&pf->switch_mutex);
13227 if (pf->veb[veb->idx] == veb)
13228 pf->veb[veb->idx] = NULL;
13229 mutex_unlock(&pf->switch_mutex);
13236 * i40e_veb_release - Delete a VEB and free its resources
13237 * @veb: the VEB being removed
13239 void i40e_veb_release(struct i40e_veb *veb)
13241 struct i40e_vsi *vsi = NULL;
13242 struct i40e_pf *pf;
13247 /* find the remaining VSI and check for extras */
13248 for (i = 0; i < pf->num_alloc_vsi; i++) {
13249 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13255 dev_info(&pf->pdev->dev,
13256 "can't remove VEB %d with %d VSIs left\n",
13261 /* move the remaining VSI to uplink veb */
13262 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13263 if (veb->uplink_seid) {
13264 vsi->uplink_seid = veb->uplink_seid;
13265 if (veb->uplink_seid == pf->mac_seid)
13266 vsi->veb_idx = I40E_NO_VEB;
13268 vsi->veb_idx = veb->veb_idx;
13271 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13272 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13275 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13276 i40e_veb_clear(veb);
13280 * i40e_add_veb - create the VEB in the switch
13281 * @veb: the VEB to be instantiated
13282 * @vsi: the controlling VSI
13284 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13286 struct i40e_pf *pf = veb->pf;
13287 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13290 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13291 veb->enabled_tc, false,
13292 &veb->seid, enable_stats, NULL);
13294 /* get a VEB from the hardware */
13296 dev_info(&pf->pdev->dev,
13297 "couldn't add VEB, err %s aq_err %s\n",
13298 i40e_stat_str(&pf->hw, ret),
13299 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13303 /* get statistics counter */
13304 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13305 &veb->stats_idx, NULL, NULL, NULL);
13307 dev_info(&pf->pdev->dev,
13308 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13309 i40e_stat_str(&pf->hw, ret),
13310 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13313 ret = i40e_veb_get_bw_info(veb);
13315 dev_info(&pf->pdev->dev,
13316 "couldn't get VEB bw info, err %s aq_err %s\n",
13317 i40e_stat_str(&pf->hw, ret),
13318 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13319 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13323 vsi->uplink_seid = veb->seid;
13324 vsi->veb_idx = veb->idx;
13325 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13331 * i40e_veb_setup - Set up a VEB
13332 * @pf: board private structure
13333 * @flags: VEB setup flags
13334 * @uplink_seid: the switch element to link to
13335 * @vsi_seid: the initial VSI seid
13336 * @enabled_tc: Enabled TC bit-map
13338 * This allocates the sw VEB structure and links it into the switch
13339 * It is possible and legal for this to be a duplicate of an already
13340 * existing VEB. It is also possible for both uplink and vsi seids
13341 * to be zero, in order to create a floating VEB.
13343 * Returns pointer to the successfully allocated VEB sw struct on
13344 * success, otherwise returns NULL on failure.
13346 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
13347 u16 uplink_seid, u16 vsi_seid,
13350 struct i40e_veb *veb, *uplink_veb = NULL;
13351 int vsi_idx, veb_idx;
13354 /* if one seid is 0, the other must be 0 to create a floating relay */
13355 if ((uplink_seid == 0 || vsi_seid == 0) &&
13356 (uplink_seid + vsi_seid != 0)) {
13357 dev_info(&pf->pdev->dev,
13358 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13359 uplink_seid, vsi_seid);
13363 /* make sure there is such a vsi and uplink */
13364 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
13365 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
13367 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
13368 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
13373 if (uplink_seid && uplink_seid != pf->mac_seid) {
13374 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
13375 if (pf->veb[veb_idx] &&
13376 pf->veb[veb_idx]->seid == uplink_seid) {
13377 uplink_veb = pf->veb[veb_idx];
13382 dev_info(&pf->pdev->dev,
13383 "uplink seid %d not found\n", uplink_seid);
13388 /* get veb sw struct */
13389 veb_idx = i40e_veb_mem_alloc(pf);
13392 veb = pf->veb[veb_idx];
13393 veb->flags = flags;
13394 veb->uplink_seid = uplink_seid;
13395 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
13396 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
13398 /* create the VEB in the switch */
13399 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
13402 if (vsi_idx == pf->lan_vsi)
13403 pf->lan_veb = veb->idx;
13408 i40e_veb_clear(veb);
13414 * i40e_setup_pf_switch_element - set PF vars based on switch type
13415 * @pf: board private structure
13416 * @ele: element we are building info from
13417 * @num_reported: total number of elements
13418 * @printconfig: should we print the contents
13420 * helper function to assist in extracting a few useful SEID values.
13422 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
13423 struct i40e_aqc_switch_config_element_resp *ele,
13424 u16 num_reported, bool printconfig)
13426 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
13427 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
13428 u8 element_type = ele->element_type;
13429 u16 seid = le16_to_cpu(ele->seid);
13432 dev_info(&pf->pdev->dev,
13433 "type=%d seid=%d uplink=%d downlink=%d\n",
13434 element_type, seid, uplink_seid, downlink_seid);
13436 switch (element_type) {
13437 case I40E_SWITCH_ELEMENT_TYPE_MAC:
13438 pf->mac_seid = seid;
13440 case I40E_SWITCH_ELEMENT_TYPE_VEB:
13442 if (uplink_seid != pf->mac_seid)
13444 if (pf->lan_veb == I40E_NO_VEB) {
13447 /* find existing or else empty VEB */
13448 for (v = 0; v < I40E_MAX_VEB; v++) {
13449 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
13454 if (pf->lan_veb == I40E_NO_VEB) {
13455 v = i40e_veb_mem_alloc(pf);
13462 pf->veb[pf->lan_veb]->seid = seid;
13463 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
13464 pf->veb[pf->lan_veb]->pf = pf;
13465 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
13467 case I40E_SWITCH_ELEMENT_TYPE_VSI:
13468 if (num_reported != 1)
13470 /* This is immediately after a reset so we can assume this is
13473 pf->mac_seid = uplink_seid;
13474 pf->pf_seid = downlink_seid;
13475 pf->main_vsi_seid = seid;
13477 dev_info(&pf->pdev->dev,
13478 "pf_seid=%d main_vsi_seid=%d\n",
13479 pf->pf_seid, pf->main_vsi_seid);
13481 case I40E_SWITCH_ELEMENT_TYPE_PF:
13482 case I40E_SWITCH_ELEMENT_TYPE_VF:
13483 case I40E_SWITCH_ELEMENT_TYPE_EMP:
13484 case I40E_SWITCH_ELEMENT_TYPE_BMC:
13485 case I40E_SWITCH_ELEMENT_TYPE_PE:
13486 case I40E_SWITCH_ELEMENT_TYPE_PA:
13487 /* ignore these for now */
13490 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
13491 element_type, seid);
13497 * i40e_fetch_switch_configuration - Get switch config from firmware
13498 * @pf: board private structure
13499 * @printconfig: should we print the contents
13501 * Get the current switch configuration from the device and
13502 * extract a few useful SEID values.
13504 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
13506 struct i40e_aqc_get_switch_config_resp *sw_config;
13512 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13516 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13518 u16 num_reported, num_total;
13520 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13524 dev_info(&pf->pdev->dev,
13525 "get switch config failed err %s aq_err %s\n",
13526 i40e_stat_str(&pf->hw, ret),
13527 i40e_aq_str(&pf->hw,
13528 pf->hw.aq.asq_last_status));
13533 num_reported = le16_to_cpu(sw_config->header.num_reported);
13534 num_total = le16_to_cpu(sw_config->header.num_total);
13537 dev_info(&pf->pdev->dev,
13538 "header: %d reported %d total\n",
13539 num_reported, num_total);
13541 for (i = 0; i < num_reported; i++) {
13542 struct i40e_aqc_switch_config_element_resp *ele =
13543 &sw_config->element[i];
13545 i40e_setup_pf_switch_element(pf, ele, num_reported,
13548 } while (next_seid != 0);
13555 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13556 * @pf: board private structure
13557 * @reinit: if the Main VSI needs to re-initialized.
13559 * Returns 0 on success, negative value on failure
13561 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
13566 /* find out what's out there already */
13567 ret = i40e_fetch_switch_configuration(pf, false);
13569 dev_info(&pf->pdev->dev,
13570 "couldn't fetch switch config, err %s aq_err %s\n",
13571 i40e_stat_str(&pf->hw, ret),
13572 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13575 i40e_pf_reset_stats(pf);
13577 /* set the switch config bit for the whole device to
13578 * support limited promisc or true promisc
13579 * when user requests promisc. The default is limited
13583 if ((pf->hw.pf_id == 0) &&
13584 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
13585 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13586 pf->last_sw_conf_flags = flags;
13589 if (pf->hw.pf_id == 0) {
13592 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13593 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
13595 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13596 dev_info(&pf->pdev->dev,
13597 "couldn't set switch config bits, err %s aq_err %s\n",
13598 i40e_stat_str(&pf->hw, ret),
13599 i40e_aq_str(&pf->hw,
13600 pf->hw.aq.asq_last_status));
13601 /* not a fatal problem, just keep going */
13603 pf->last_sw_conf_valid_flags = valid_flags;
13606 /* first time setup */
13607 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
13608 struct i40e_vsi *vsi = NULL;
13611 /* Set up the PF VSI associated with the PF's main VSI
13612 * that is already in the HW switch
13614 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
13615 uplink_seid = pf->veb[pf->lan_veb]->seid;
13617 uplink_seid = pf->mac_seid;
13618 if (pf->lan_vsi == I40E_NO_VSI)
13619 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13621 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
13623 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
13624 i40e_cloud_filter_exit(pf);
13625 i40e_fdir_teardown(pf);
13629 /* force a reset of TC and queue layout configurations */
13630 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13632 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13633 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13634 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13636 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13638 i40e_fdir_sb_setup(pf);
13640 /* Setup static PF queue filter control settings */
13641 ret = i40e_setup_pf_filter_control(pf);
13643 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13645 /* Failure here should not stop continuing other steps */
13648 /* enable RSS in the HW, even for only one queue, as the stack can use
13651 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
13652 i40e_pf_config_rss(pf);
13654 /* fill in link information and enable LSE reporting */
13655 i40e_link_event(pf);
13657 /* Initialize user-specific link properties */
13658 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13659 I40E_AQ_AN_COMPLETED) ? true : false);
13663 /* repopulate tunnel port filters */
13664 i40e_sync_udp_filters(pf);
13670 * i40e_determine_queue_usage - Work out queue distribution
13671 * @pf: board private structure
13673 static void i40e_determine_queue_usage(struct i40e_pf *pf)
13678 pf->num_lan_qps = 0;
13680 /* Find the max queues to be put into basic use. We'll always be
13681 * using TC0, whether or not DCB is running, and TC0 will get the
13684 queues_left = pf->hw.func_caps.num_tx_qp;
13686 if ((queues_left == 1) ||
13687 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
13688 /* one qp for PF, no queues for anything else */
13690 pf->alloc_rss_size = pf->num_lan_qps = 1;
13692 /* make sure all the fancies are disabled */
13693 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13694 I40E_FLAG_IWARP_ENABLED |
13695 I40E_FLAG_FD_SB_ENABLED |
13696 I40E_FLAG_FD_ATR_ENABLED |
13697 I40E_FLAG_DCB_CAPABLE |
13698 I40E_FLAG_DCB_ENABLED |
13699 I40E_FLAG_SRIOV_ENABLED |
13700 I40E_FLAG_VMDQ_ENABLED);
13701 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13702 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13703 I40E_FLAG_FD_SB_ENABLED |
13704 I40E_FLAG_FD_ATR_ENABLED |
13705 I40E_FLAG_DCB_CAPABLE))) {
13706 /* one qp for PF */
13707 pf->alloc_rss_size = pf->num_lan_qps = 1;
13708 queues_left -= pf->num_lan_qps;
13710 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13711 I40E_FLAG_IWARP_ENABLED |
13712 I40E_FLAG_FD_SB_ENABLED |
13713 I40E_FLAG_FD_ATR_ENABLED |
13714 I40E_FLAG_DCB_ENABLED |
13715 I40E_FLAG_VMDQ_ENABLED);
13716 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13718 /* Not enough queues for all TCs */
13719 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
13720 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
13721 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13722 I40E_FLAG_DCB_ENABLED);
13723 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13726 /* limit lan qps to the smaller of qps, cpus or msix */
13727 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13728 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13729 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13730 pf->num_lan_qps = q_max;
13732 queues_left -= pf->num_lan_qps;
13735 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13736 if (queues_left > 1) {
13737 queues_left -= 1; /* save 1 queue for FD */
13739 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
13740 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13741 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13745 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13746 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
13747 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13748 (queues_left / pf->num_vf_qps));
13749 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13752 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13753 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13754 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13755 (queues_left / pf->num_vmdq_qps));
13756 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13759 pf->queues_left = queues_left;
13760 dev_dbg(&pf->pdev->dev,
13761 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13762 pf->hw.func_caps.num_tx_qp,
13763 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
13764 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13765 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13770 * i40e_setup_pf_filter_control - Setup PF static filter control
13771 * @pf: PF to be setup
13773 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13774 * settings. If PE/FCoE are enabled then it will also set the per PF
13775 * based filter sizes required for them. It also enables Flow director,
13776 * ethertype and macvlan type filter settings for the pf.
13778 * Returns 0 on success, negative on failure
13780 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13782 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13784 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13786 /* Flow Director is enabled */
13787 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
13788 settings->enable_fdir = true;
13790 /* Ethtype and MACVLAN filters enabled for PF */
13791 settings->enable_ethtype = true;
13792 settings->enable_macvlan = true;
13794 if (i40e_set_filter_control(&pf->hw, settings))
13800 #define INFO_STRING_LEN 255
13801 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13802 static void i40e_print_features(struct i40e_pf *pf)
13804 struct i40e_hw *hw = &pf->hw;
13808 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13812 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
13813 #ifdef CONFIG_PCI_IOV
13814 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
13816 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
13817 pf->hw.func_caps.num_vsis,
13818 pf->vsi[pf->lan_vsi]->num_queue_pairs);
13819 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13820 i += snprintf(&buf[i], REMAIN(i), " RSS");
13821 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13822 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
13823 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13824 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13825 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
13827 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13828 i += snprintf(&buf[i], REMAIN(i), " DCB");
13829 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13830 i += snprintf(&buf[i], REMAIN(i), " Geneve");
13831 if (pf->flags & I40E_FLAG_PTP)
13832 i += snprintf(&buf[i], REMAIN(i), " PTP");
13833 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13834 i += snprintf(&buf[i], REMAIN(i), " VEB");
13836 i += snprintf(&buf[i], REMAIN(i), " VEPA");
13838 dev_info(&pf->pdev->dev, "%s\n", buf);
13840 WARN_ON(i > INFO_STRING_LEN);
13844 * i40e_get_platform_mac_addr - get platform-specific MAC address
13845 * @pdev: PCI device information struct
13846 * @pf: board private structure
13848 * Look up the MAC address for the device. First we'll try
13849 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13850 * specific fallback. Otherwise, we'll default to the stored value in
13853 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13855 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13856 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
13860 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
13861 * @fec_cfg: FEC option to set in flags
13862 * @flags: ptr to flags in which we set FEC option
13864 void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
13866 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
13867 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
13868 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
13869 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
13870 *flags |= I40E_FLAG_RS_FEC;
13871 *flags &= ~I40E_FLAG_BASE_R_FEC;
13873 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
13874 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
13875 *flags |= I40E_FLAG_BASE_R_FEC;
13876 *flags &= ~I40E_FLAG_RS_FEC;
13879 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
13883 * i40e_probe - Device initialization routine
13884 * @pdev: PCI device information struct
13885 * @ent: entry in i40e_pci_tbl
13887 * i40e_probe initializes a PF identified by a pci_dev structure.
13888 * The OS initialization, configuring of the PF private structure,
13889 * and a hardware reset occur.
13891 * Returns 0 on success, negative on failure
13893 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13895 struct i40e_aq_get_phy_abilities_resp abilities;
13896 struct i40e_pf *pf;
13897 struct i40e_hw *hw;
13898 static u16 pfs_found;
13906 err = pci_enable_device_mem(pdev);
13910 /* set up for high or low dma */
13911 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
13913 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13915 dev_err(&pdev->dev,
13916 "DMA configuration failed: 0x%x\n", err);
13921 /* set up pci connections */
13922 err = pci_request_mem_regions(pdev, i40e_driver_name);
13924 dev_info(&pdev->dev,
13925 "pci_request_selected_regions failed %d\n", err);
13929 pci_enable_pcie_error_reporting(pdev);
13930 pci_set_master(pdev);
13932 /* Now that we have a PCI connection, we need to do the
13933 * low level device setup. This is primarily setting up
13934 * the Admin Queue structures and then querying for the
13935 * device's current profile information.
13937 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
13944 set_bit(__I40E_DOWN, pf->state);
13949 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
13950 I40E_MAX_CSR_SPACE);
13952 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
13953 if (!hw->hw_addr) {
13955 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13956 (unsigned int)pci_resource_start(pdev, 0),
13957 pf->ioremap_len, err);
13960 hw->vendor_id = pdev->vendor;
13961 hw->device_id = pdev->device;
13962 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
13963 hw->subsystem_vendor_id = pdev->subsystem_vendor;
13964 hw->subsystem_device_id = pdev->subsystem_device;
13965 hw->bus.device = PCI_SLOT(pdev->devfn);
13966 hw->bus.func = PCI_FUNC(pdev->devfn);
13967 hw->bus.bus_id = pdev->bus->number;
13968 pf->instance = pfs_found;
13970 /* Select something other than the 802.1ad ethertype for the
13971 * switch to use internally and drop on ingress.
13973 hw->switch_tag = 0xffff;
13974 hw->first_tag = ETH_P_8021AD;
13975 hw->second_tag = ETH_P_8021Q;
13977 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
13978 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
13980 /* set up the locks for the AQ, do this only once in probe
13981 * and destroy them only once in remove
13983 mutex_init(&hw->aq.asq_mutex);
13984 mutex_init(&hw->aq.arq_mutex);
13986 pf->msg_enable = netif_msg_init(debug,
13991 pf->hw.debug_mask = debug;
13993 /* do a special CORER for clearing PXE mode once at init */
13994 if (hw->revision_id == 0 &&
13995 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
13996 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
14001 i40e_clear_pxe_mode(hw);
14004 /* Reset here to make sure all is clean and to define PF 'n' */
14006 err = i40e_pf_reset(hw);
14008 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
14013 hw->aq.num_arq_entries = I40E_AQ_LEN;
14014 hw->aq.num_asq_entries = I40E_AQ_LEN;
14015 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14016 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
14017 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
14019 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
14021 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
14023 err = i40e_init_shared_code(hw);
14025 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
14030 /* set up a default setting for link flow control */
14031 pf->hw.fc.requested_mode = I40E_FC_NONE;
14033 err = i40e_init_adminq(hw);
14035 if (err == I40E_ERR_FIRMWARE_API_VERSION)
14036 dev_info(&pdev->dev,
14037 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
14039 dev_info(&pdev->dev,
14040 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14044 i40e_get_oem_version(hw);
14046 /* provide nvm, fw, api versions */
14047 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
14048 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
14049 hw->aq.api_maj_ver, hw->aq.api_min_ver,
14050 i40e_nvm_version_str(hw));
14052 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
14053 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
14054 dev_info(&pdev->dev,
14055 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
14056 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
14057 dev_info(&pdev->dev,
14058 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
14060 i40e_verify_eeprom(pf);
14062 /* Rev 0 hardware was never productized */
14063 if (hw->revision_id < 1)
14064 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14066 i40e_clear_pxe_mode(hw);
14067 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
14069 goto err_adminq_setup;
14071 err = i40e_sw_init(pf);
14073 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
14077 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
14078 hw->func_caps.num_rx_qp, 0, 0);
14080 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
14081 goto err_init_lan_hmc;
14084 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
14086 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
14088 goto err_configure_lan_hmc;
14091 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
14092 * Ignore error return codes because if it was already disabled via
14093 * hardware settings this will fail
14095 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
14096 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
14097 i40e_aq_stop_lldp(hw, true, NULL);
14100 /* allow a platform config to override the HW addr */
14101 i40e_get_platform_mac_addr(pdev, pf);
14103 if (!is_valid_ether_addr(hw->mac.addr)) {
14104 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
14108 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
14109 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
14110 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
14111 if (is_valid_ether_addr(hw->mac.port_addr))
14112 pf->hw_features |= I40E_HW_PORT_ID_VALID;
14114 pci_set_drvdata(pdev, pf);
14115 pci_save_state(pdev);
14117 /* Enable FW to write default DCB config on link-up */
14118 i40e_aq_set_dcb_parameters(hw, true, NULL);
14120 #ifdef CONFIG_I40E_DCB
14121 err = i40e_init_pf_dcb(pf);
14123 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
14124 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
14125 /* Continue without DCB enabled */
14127 #endif /* CONFIG_I40E_DCB */
14129 /* set up periodic task facility */
14130 timer_setup(&pf->service_timer, i40e_service_timer, 0);
14131 pf->service_timer_period = HZ;
14133 INIT_WORK(&pf->service_task, i40e_service_task);
14134 clear_bit(__I40E_SERVICE_SCHED, pf->state);
14136 /* NVM bit on means WoL disabled for the port */
14137 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
14138 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
14139 pf->wol_en = false;
14142 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
14144 /* set up the main switch operations */
14145 i40e_determine_queue_usage(pf);
14146 err = i40e_init_interrupt_scheme(pf);
14148 goto err_switch_setup;
14150 /* The number of VSIs reported by the FW is the minimum guaranteed
14151 * to us; HW supports far more and we share the remaining pool with
14152 * the other PFs. We allocate space for more than the guarantee with
14153 * the understanding that we might not get them all later.
14155 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
14156 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
14158 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
14160 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
14161 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
14165 goto err_switch_setup;
14168 #ifdef CONFIG_PCI_IOV
14169 /* prep for VF support */
14170 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14171 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14172 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14173 if (pci_num_vf(pdev))
14174 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
14177 err = i40e_setup_pf_switch(pf, false);
14179 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
14182 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
14184 /* Make sure flow control is set according to current settings */
14185 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
14186 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
14187 dev_dbg(&pf->pdev->dev,
14188 "Set fc with err %s aq_err %s on get_phy_cap\n",
14189 i40e_stat_str(hw, err),
14190 i40e_aq_str(hw, hw->aq.asq_last_status));
14191 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
14192 dev_dbg(&pf->pdev->dev,
14193 "Set fc with err %s aq_err %s on set_phy_config\n",
14194 i40e_stat_str(hw, err),
14195 i40e_aq_str(hw, hw->aq.asq_last_status));
14196 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
14197 dev_dbg(&pf->pdev->dev,
14198 "Set fc with err %s aq_err %s on get_link_info\n",
14199 i40e_stat_str(hw, err),
14200 i40e_aq_str(hw, hw->aq.asq_last_status));
14202 /* if FDIR VSI was set up, start it now */
14203 for (i = 0; i < pf->num_alloc_vsi; i++) {
14204 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
14205 i40e_vsi_open(pf->vsi[i]);
14210 /* The driver only wants link up/down and module qualification
14211 * reports from firmware. Note the negative logic.
14213 err = i40e_aq_set_phy_int_mask(&pf->hw,
14214 ~(I40E_AQ_EVENT_LINK_UPDOWN |
14215 I40E_AQ_EVENT_MEDIA_NA |
14216 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
14218 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
14219 i40e_stat_str(&pf->hw, err),
14220 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14222 /* Reconfigure hardware for allowing smaller MSS in the case
14223 * of TSO, so that we avoid the MDD being fired and causing
14224 * a reset in the case of small MSS+TSO.
14226 val = rd32(hw, I40E_REG_MSS);
14227 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
14228 val &= ~I40E_REG_MSS_MIN_MASK;
14229 val |= I40E_64BYTE_MSS;
14230 wr32(hw, I40E_REG_MSS, val);
14233 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
14235 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
14237 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
14238 i40e_stat_str(&pf->hw, err),
14239 i40e_aq_str(&pf->hw,
14240 pf->hw.aq.asq_last_status));
14242 /* The main driver is (mostly) up and happy. We need to set this state
14243 * before setting up the misc vector or we get a race and the vector
14244 * ends up disabled forever.
14246 clear_bit(__I40E_DOWN, pf->state);
14248 /* In case of MSIX we are going to setup the misc vector right here
14249 * to handle admin queue events etc. In case of legacy and MSI
14250 * the misc functionality and queue processing is combined in
14251 * the same vector and that gets setup at open.
14253 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
14254 err = i40e_setup_misc_vector(pf);
14256 dev_info(&pdev->dev,
14257 "setup of misc vector failed: %d\n", err);
14262 #ifdef CONFIG_PCI_IOV
14263 /* prep for VF support */
14264 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
14265 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
14266 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
14267 /* disable link interrupts for VFs */
14268 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
14269 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
14270 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
14273 if (pci_num_vf(pdev)) {
14274 dev_info(&pdev->dev,
14275 "Active VFs found, allocating resources.\n");
14276 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
14278 dev_info(&pdev->dev,
14279 "Error %d allocating resources for existing VFs\n",
14283 #endif /* CONFIG_PCI_IOV */
14285 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14286 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
14287 pf->num_iwarp_msix,
14288 I40E_IWARP_IRQ_PILE_ID);
14289 if (pf->iwarp_base_vector < 0) {
14290 dev_info(&pdev->dev,
14291 "failed to get tracking for %d vectors for IWARP err=%d\n",
14292 pf->num_iwarp_msix, pf->iwarp_base_vector);
14293 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
14297 i40e_dbg_pf_init(pf);
14299 /* tell the firmware that we're starting */
14300 i40e_send_version(pf);
14302 /* since everything's happy, start the service_task timer */
14303 mod_timer(&pf->service_timer,
14304 round_jiffies(jiffies + pf->service_timer_period));
14306 /* add this PF to client device list and launch a client service task */
14307 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14308 err = i40e_lan_add_device(pf);
14310 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
14314 #define PCI_SPEED_SIZE 8
14315 #define PCI_WIDTH_SIZE 8
14316 /* Devices on the IOSF bus do not have this information
14317 * and will report PCI Gen 1 x 1 by default so don't bother
14320 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
14321 char speed[PCI_SPEED_SIZE] = "Unknown";
14322 char width[PCI_WIDTH_SIZE] = "Unknown";
14324 /* Get the negotiated link width and speed from PCI config
14327 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
14330 i40e_set_pci_config_data(hw, link_status);
14332 switch (hw->bus.speed) {
14333 case i40e_bus_speed_8000:
14334 strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
14335 case i40e_bus_speed_5000:
14336 strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
14337 case i40e_bus_speed_2500:
14338 strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
14342 switch (hw->bus.width) {
14343 case i40e_bus_width_pcie_x8:
14344 strlcpy(width, "8", PCI_WIDTH_SIZE); break;
14345 case i40e_bus_width_pcie_x4:
14346 strlcpy(width, "4", PCI_WIDTH_SIZE); break;
14347 case i40e_bus_width_pcie_x2:
14348 strlcpy(width, "2", PCI_WIDTH_SIZE); break;
14349 case i40e_bus_width_pcie_x1:
14350 strlcpy(width, "1", PCI_WIDTH_SIZE); break;
14355 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
14358 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
14359 hw->bus.speed < i40e_bus_speed_8000) {
14360 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
14361 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
14365 /* get the requested speeds from the fw */
14366 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
14368 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
14369 i40e_stat_str(&pf->hw, err),
14370 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14371 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
14373 /* set the FEC config due to the board capabilities */
14374 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
14376 /* get the supported phy types from the fw */
14377 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
14379 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
14380 i40e_stat_str(&pf->hw, err),
14381 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14383 /* Add a filter to drop all Flow control frames from any VSI from being
14384 * transmitted. By doing so we stop a malicious VF from sending out
14385 * PAUSE or PFC frames and potentially controlling traffic for other
14387 * The FW can still send Flow control frames if enabled.
14389 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
14390 pf->main_vsi_seid);
14392 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
14393 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
14394 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
14395 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
14396 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
14397 /* print a string summarizing features */
14398 i40e_print_features(pf);
14402 /* Unwind what we've done if something failed in the setup */
14404 set_bit(__I40E_DOWN, pf->state);
14405 i40e_clear_interrupt_scheme(pf);
14408 i40e_reset_interrupt_capability(pf);
14409 del_timer_sync(&pf->service_timer);
14411 err_configure_lan_hmc:
14412 (void)i40e_shutdown_lan_hmc(hw);
14414 kfree(pf->qp_pile);
14418 iounmap(hw->hw_addr);
14422 pci_disable_pcie_error_reporting(pdev);
14423 pci_release_mem_regions(pdev);
14426 pci_disable_device(pdev);
14431 * i40e_remove - Device removal routine
14432 * @pdev: PCI device information struct
14434 * i40e_remove is called by the PCI subsystem to alert the driver
14435 * that is should release a PCI device. This could be caused by a
14436 * Hot-Plug event, or because the driver is going to be removed from
14439 static void i40e_remove(struct pci_dev *pdev)
14441 struct i40e_pf *pf = pci_get_drvdata(pdev);
14442 struct i40e_hw *hw = &pf->hw;
14443 i40e_status ret_code;
14446 i40e_dbg_pf_exit(pf);
14450 /* Disable RSS in hw */
14451 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
14452 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
14454 /* no more scheduling of any task */
14455 set_bit(__I40E_SUSPENDED, pf->state);
14456 set_bit(__I40E_DOWN, pf->state);
14457 if (pf->service_timer.function)
14458 del_timer_sync(&pf->service_timer);
14459 if (pf->service_task.func)
14460 cancel_work_sync(&pf->service_task);
14462 /* Client close must be called explicitly here because the timer
14463 * has been stopped.
14465 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14467 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
14469 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
14472 i40e_fdir_teardown(pf);
14474 /* If there is a switch structure or any orphans, remove them.
14475 * This will leave only the PF's VSI remaining.
14477 for (i = 0; i < I40E_MAX_VEB; i++) {
14481 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
14482 pf->veb[i]->uplink_seid == 0)
14483 i40e_switch_branch_release(pf->veb[i]);
14486 /* Now we can shutdown the PF's VSI, just before we kill
14489 if (pf->vsi[pf->lan_vsi])
14490 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
14492 i40e_cloud_filter_exit(pf);
14494 /* remove attached clients */
14495 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14496 ret_code = i40e_lan_del_device(pf);
14498 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
14502 /* shutdown and destroy the HMC */
14503 if (hw->hmc.hmc_obj) {
14504 ret_code = i40e_shutdown_lan_hmc(hw);
14506 dev_warn(&pdev->dev,
14507 "Failed to destroy the HMC resources: %d\n",
14511 /* shutdown the adminq */
14512 i40e_shutdown_adminq(hw);
14514 /* destroy the locks only once, here */
14515 mutex_destroy(&hw->aq.arq_mutex);
14516 mutex_destroy(&hw->aq.asq_mutex);
14518 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
14520 i40e_clear_interrupt_scheme(pf);
14521 for (i = 0; i < pf->num_alloc_vsi; i++) {
14523 i40e_vsi_clear_rings(pf->vsi[i]);
14524 i40e_vsi_clear(pf->vsi[i]);
14530 for (i = 0; i < I40E_MAX_VEB; i++) {
14535 kfree(pf->qp_pile);
14538 iounmap(hw->hw_addr);
14540 pci_release_mem_regions(pdev);
14542 pci_disable_pcie_error_reporting(pdev);
14543 pci_disable_device(pdev);
14547 * i40e_pci_error_detected - warning that something funky happened in PCI land
14548 * @pdev: PCI device information struct
14549 * @error: the type of PCI error
14551 * Called to warn that something happened and the error handling steps
14552 * are in progress. Allows the driver to quiesce things, be ready for
14555 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14556 enum pci_channel_state error)
14558 struct i40e_pf *pf = pci_get_drvdata(pdev);
14560 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14563 dev_info(&pdev->dev,
14564 "Cannot recover - error happened during device probe\n");
14565 return PCI_ERS_RESULT_DISCONNECT;
14568 /* shutdown all operations */
14569 if (!test_bit(__I40E_SUSPENDED, pf->state))
14570 i40e_prep_for_reset(pf, false);
14572 /* Request a slot reset */
14573 return PCI_ERS_RESULT_NEED_RESET;
14577 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14578 * @pdev: PCI device information struct
14580 * Called to find if the driver can work with the device now that
14581 * the pci slot has been reset. If a basic connection seems good
14582 * (registers are readable and have sane content) then return a
14583 * happy little PCI_ERS_RESULT_xxx.
14585 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14587 struct i40e_pf *pf = pci_get_drvdata(pdev);
14588 pci_ers_result_t result;
14591 dev_dbg(&pdev->dev, "%s\n", __func__);
14592 if (pci_enable_device_mem(pdev)) {
14593 dev_info(&pdev->dev,
14594 "Cannot re-enable PCI device after reset.\n");
14595 result = PCI_ERS_RESULT_DISCONNECT;
14597 pci_set_master(pdev);
14598 pci_restore_state(pdev);
14599 pci_save_state(pdev);
14600 pci_wake_from_d3(pdev, false);
14602 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14604 result = PCI_ERS_RESULT_RECOVERED;
14606 result = PCI_ERS_RESULT_DISCONNECT;
14613 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14614 * @pdev: PCI device information struct
14616 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14618 struct i40e_pf *pf = pci_get_drvdata(pdev);
14620 i40e_prep_for_reset(pf, false);
14624 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14625 * @pdev: PCI device information struct
14627 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14629 struct i40e_pf *pf = pci_get_drvdata(pdev);
14631 i40e_reset_and_rebuild(pf, false, false);
14635 * i40e_pci_error_resume - restart operations after PCI error recovery
14636 * @pdev: PCI device information struct
14638 * Called to allow the driver to bring things back up after PCI error
14639 * and/or reset recovery has finished.
14641 static void i40e_pci_error_resume(struct pci_dev *pdev)
14643 struct i40e_pf *pf = pci_get_drvdata(pdev);
14645 dev_dbg(&pdev->dev, "%s\n", __func__);
14646 if (test_bit(__I40E_SUSPENDED, pf->state))
14649 i40e_handle_reset_warning(pf, false);
14653 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14654 * using the mac_address_write admin q function
14655 * @pf: pointer to i40e_pf struct
14657 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14659 struct i40e_hw *hw = &pf->hw;
14664 /* Get current MAC address in case it's an LAA */
14665 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14666 ether_addr_copy(mac_addr,
14667 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14669 dev_err(&pf->pdev->dev,
14670 "Failed to retrieve MAC address; using default\n");
14671 ether_addr_copy(mac_addr, hw->mac.addr);
14674 /* The FW expects the mac address write cmd to first be called with
14675 * one of these flags before calling it again with the multicast
14678 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14680 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14681 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14683 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14685 dev_err(&pf->pdev->dev,
14686 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14690 flags = I40E_AQC_MC_MAG_EN
14691 | I40E_AQC_WOL_PRESERVE_ON_PFR
14692 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14693 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14695 dev_err(&pf->pdev->dev,
14696 "Failed to enable Multicast Magic Packet wake up\n");
14700 * i40e_shutdown - PCI callback for shutting down
14701 * @pdev: PCI device information struct
14703 static void i40e_shutdown(struct pci_dev *pdev)
14705 struct i40e_pf *pf = pci_get_drvdata(pdev);
14706 struct i40e_hw *hw = &pf->hw;
14708 set_bit(__I40E_SUSPENDED, pf->state);
14709 set_bit(__I40E_DOWN, pf->state);
14711 del_timer_sync(&pf->service_timer);
14712 cancel_work_sync(&pf->service_task);
14713 i40e_cloud_filter_exit(pf);
14714 i40e_fdir_teardown(pf);
14716 /* Client close must be called explicitly here because the timer
14717 * has been stopped.
14719 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14721 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14722 i40e_enable_mc_magic_wake(pf);
14724 i40e_prep_for_reset(pf, false);
14726 wr32(hw, I40E_PFPM_APM,
14727 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14728 wr32(hw, I40E_PFPM_WUFC,
14729 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14731 /* Since we're going to destroy queues during the
14732 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14736 i40e_clear_interrupt_scheme(pf);
14739 if (system_state == SYSTEM_POWER_OFF) {
14740 pci_wake_from_d3(pdev, pf->wol_en);
14741 pci_set_power_state(pdev, PCI_D3hot);
14746 * i40e_suspend - PM callback for moving to D3
14747 * @dev: generic device information structure
14749 static int __maybe_unused i40e_suspend(struct device *dev)
14751 struct pci_dev *pdev = to_pci_dev(dev);
14752 struct i40e_pf *pf = pci_get_drvdata(pdev);
14753 struct i40e_hw *hw = &pf->hw;
14755 /* If we're already suspended, then there is nothing to do */
14756 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
14759 set_bit(__I40E_DOWN, pf->state);
14761 /* Ensure service task will not be running */
14762 del_timer_sync(&pf->service_timer);
14763 cancel_work_sync(&pf->service_task);
14765 /* Client close must be called explicitly here because the timer
14766 * has been stopped.
14768 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14770 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14771 i40e_enable_mc_magic_wake(pf);
14773 /* Since we're going to destroy queues during the
14774 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14779 i40e_prep_for_reset(pf, true);
14781 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14782 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14784 /* Clear the interrupt scheme and release our IRQs so that the system
14785 * can safely hibernate even when there are a large number of CPUs.
14786 * Otherwise hibernation might fail when mapping all the vectors back
14789 i40e_clear_interrupt_scheme(pf);
14797 * i40e_resume - PM callback for waking up from D3
14798 * @dev: generic device information structure
14800 static int __maybe_unused i40e_resume(struct device *dev)
14802 struct pci_dev *pdev = to_pci_dev(dev);
14803 struct i40e_pf *pf = pci_get_drvdata(pdev);
14806 /* If we're not suspended, then there is nothing to do */
14807 if (!test_bit(__I40E_SUSPENDED, pf->state))
14810 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
14811 * since we're going to be restoring queues
14815 /* We cleared the interrupt scheme when we suspended, so we need to
14816 * restore it now to resume device functionality.
14818 err = i40e_restore_interrupt_scheme(pf);
14820 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
14824 clear_bit(__I40E_DOWN, pf->state);
14825 i40e_reset_and_rebuild(pf, false, true);
14829 /* Clear suspended state last after everything is recovered */
14830 clear_bit(__I40E_SUSPENDED, pf->state);
14832 /* Restart the service task */
14833 mod_timer(&pf->service_timer,
14834 round_jiffies(jiffies + pf->service_timer_period));
14839 static const struct pci_error_handlers i40e_err_handler = {
14840 .error_detected = i40e_pci_error_detected,
14841 .slot_reset = i40e_pci_error_slot_reset,
14842 .reset_prepare = i40e_pci_error_reset_prepare,
14843 .reset_done = i40e_pci_error_reset_done,
14844 .resume = i40e_pci_error_resume,
14847 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
14849 static struct pci_driver i40e_driver = {
14850 .name = i40e_driver_name,
14851 .id_table = i40e_pci_tbl,
14852 .probe = i40e_probe,
14853 .remove = i40e_remove,
14855 .pm = &i40e_pm_ops,
14857 .shutdown = i40e_shutdown,
14858 .err_handler = &i40e_err_handler,
14859 .sriov_configure = i40e_pci_sriov_configure,
14863 * i40e_init_module - Driver registration routine
14865 * i40e_init_module is the first routine called when the driver is
14866 * loaded. All it does is register with the PCI subsystem.
14868 static int __init i40e_init_module(void)
14870 pr_info("%s: %s - version %s\n", i40e_driver_name,
14871 i40e_driver_string, i40e_driver_version_str);
14872 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
14874 /* There is no need to throttle the number of active tasks because
14875 * each device limits its own task using a state bit for scheduling
14876 * the service task, and the device tasks do not interfere with each
14877 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14878 * since we need to be able to guarantee forward progress even under
14881 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
14883 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
14888 return pci_register_driver(&i40e_driver);
14890 module_init(i40e_init_module);
14893 * i40e_exit_module - Driver exit cleanup routine
14895 * i40e_exit_module is called just before the driver is removed
14898 static void __exit i40e_exit_module(void)
14900 pci_unregister_driver(&i40e_driver);
14901 destroy_workqueue(i40e_wq);
14904 module_exit(i40e_exit_module);