1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <generated/utsrelease.h>
13 #include "ice_dcb_lib.h"
14 #include "ice_dcb_nl.h"
15 #include "ice_devlink.h"
16 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
17 * ice tracepoint functions. This must be done exactly once across the
20 #define CREATE_TRACE_POINTS
21 #include "ice_trace.h"
22 #include "ice_eswitch.h"
23 #include "ice_tc_lib.h"
25 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
26 static const char ice_driver_string[] = DRV_SUMMARY;
27 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
29 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
30 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
31 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34 MODULE_DESCRIPTION(DRV_SUMMARY);
35 MODULE_LICENSE("GPL v2");
36 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
38 static int debug = -1;
39 module_param(debug, int, 0644);
40 #ifndef CONFIG_DYNAMIC_DEBUG
41 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
44 #endif /* !CONFIG_DYNAMIC_DEBUG */
46 static DEFINE_IDA(ice_aux_ida);
47 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
48 EXPORT_SYMBOL(ice_xdp_locking_key);
50 static struct workqueue_struct *ice_wq;
51 static const struct net_device_ops ice_netdev_safe_mode_ops;
52 static const struct net_device_ops ice_netdev_ops;
54 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
56 static void ice_vsi_release_all(struct ice_pf *pf);
58 static int ice_rebuild_channels(struct ice_pf *pf);
59 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
62 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
63 void *cb_priv, enum tc_setup_type type, void *type_data,
65 void (*cleanup)(struct flow_block_cb *block_cb));
67 bool netif_is_ice(struct net_device *dev)
69 return dev && (dev->netdev_ops == &ice_netdev_ops);
73 * ice_get_tx_pending - returns number of Tx descriptors not processed
74 * @ring: the ring of descriptors
76 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
80 head = ring->next_to_clean;
81 tail = ring->next_to_use;
84 return (head < tail) ?
85 tail - head : (tail + ring->count - head);
90 * ice_check_for_hang_subtask - check for and recover hung queues
91 * @pf: pointer to PF struct
93 static void ice_check_for_hang_subtask(struct ice_pf *pf)
95 struct ice_vsi *vsi = NULL;
101 ice_for_each_vsi(pf, v)
102 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
107 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
110 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
115 ice_for_each_txq(vsi, i) {
116 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
120 if (ice_ring_ch_enabled(tx_ring))
124 /* If packet counter has not changed the queue is
125 * likely stalled, so force an interrupt for this
128 * prev_pkt would be negative if there was no
131 packets = tx_ring->stats.pkts & INT_MAX;
132 if (tx_ring->tx_stats.prev_pkt == packets) {
133 /* Trigger sw interrupt to revive the queue */
134 ice_trigger_sw_intr(hw, tx_ring->q_vector);
138 /* Memory barrier between read of packet count and call
139 * to ice_get_tx_pending()
142 tx_ring->tx_stats.prev_pkt =
143 ice_get_tx_pending(tx_ring) ? packets : -1;
149 * ice_init_mac_fltr - Set initial MAC filters
150 * @pf: board private structure
152 * Set initial set of MAC filters for PF VSI; configure filters for permanent
153 * address and broadcast address. If an error is encountered, netdevice will be
156 static int ice_init_mac_fltr(struct ice_pf *pf)
161 vsi = ice_get_main_vsi(pf);
165 perm_addr = vsi->port_info->mac.perm_addr;
166 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
170 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
171 * @netdev: the net device on which the sync is happening
172 * @addr: MAC address to sync
174 * This is a callback function which is called by the in kernel device sync
175 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
176 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
177 * MAC filters from the hardware.
179 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
181 struct ice_netdev_priv *np = netdev_priv(netdev);
182 struct ice_vsi *vsi = np->vsi;
184 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
192 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
193 * @netdev: the net device on which the unsync is happening
194 * @addr: MAC address to unsync
196 * This is a callback function which is called by the in kernel device unsync
197 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
198 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
199 * delete the MAC filters from the hardware.
201 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
203 struct ice_netdev_priv *np = netdev_priv(netdev);
204 struct ice_vsi *vsi = np->vsi;
206 /* Under some circumstances, we might receive a request to delete our
207 * own device address from our uc list. Because we store the device
208 * address in the VSI's MAC filter list, we need to ignore such
209 * requests and not delete our device address from this list.
211 if (ether_addr_equal(addr, netdev->dev_addr))
214 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
222 * ice_vsi_fltr_changed - check if filter state changed
223 * @vsi: VSI to be checked
225 * returns true if filter state has changed, false otherwise.
227 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
229 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
230 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
231 test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
235 * ice_set_promisc - Enable promiscuous mode for a given PF
236 * @vsi: the VSI being configured
237 * @promisc_m: mask of promiscuous config bits
240 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
244 if (vsi->type != ICE_VSI_PF)
247 if (vsi->num_vlan > 1)
248 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
250 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
255 * ice_clear_promisc - Disable promiscuous mode for a given PF
256 * @vsi: the VSI being configured
257 * @promisc_m: mask of promiscuous config bits
260 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
264 if (vsi->type != ICE_VSI_PF)
267 if (vsi->num_vlan > 1)
268 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
270 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
275 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
276 * @vsi: ptr to the VSI
278 * Push any outstanding VSI filter changes through the AdminQ.
280 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
282 struct device *dev = ice_pf_to_dev(vsi->back);
283 struct net_device *netdev = vsi->netdev;
284 bool promisc_forced_on = false;
285 struct ice_pf *pf = vsi->back;
286 struct ice_hw *hw = &pf->hw;
287 u32 changed_flags = 0;
294 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
295 usleep_range(1000, 2000);
297 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
298 vsi->current_netdev_flags = vsi->netdev->flags;
300 INIT_LIST_HEAD(&vsi->tmp_sync_list);
301 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
303 if (ice_vsi_fltr_changed(vsi)) {
304 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
305 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
306 clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
308 /* grab the netdev's addr_list_lock */
309 netif_addr_lock_bh(netdev);
310 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
311 ice_add_mac_to_unsync_list);
312 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
313 ice_add_mac_to_unsync_list);
314 /* our temp lists are populated. release lock */
315 netif_addr_unlock_bh(netdev);
318 /* Remove MAC addresses in the unsync list */
319 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
320 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
322 netdev_err(netdev, "Failed to delete MAC filters\n");
323 /* if we failed because of alloc failures, just bail */
328 /* Add MAC addresses in the sync list */
329 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
330 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
331 /* If filter is added successfully or already exists, do not go into
332 * 'if' condition and report it as error. Instead continue processing
333 * rest of the function.
335 if (err && err != -EEXIST) {
336 netdev_err(netdev, "Failed to add MAC filters\n");
337 /* If there is no more space for new umac filters, VSI
338 * should go into promiscuous mode. There should be some
339 * space reserved for promiscuous filters.
341 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
342 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
344 promisc_forced_on = true;
345 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
352 /* check for changes in promiscuous modes */
353 if (changed_flags & IFF_ALLMULTI) {
354 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
355 if (vsi->num_vlan > 1)
356 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
358 promisc_m = ICE_MCAST_PROMISC_BITS;
360 err = ice_set_promisc(vsi, promisc_m);
362 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
364 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
368 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
369 if (vsi->num_vlan > 1)
370 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
372 promisc_m = ICE_MCAST_PROMISC_BITS;
374 err = ice_clear_promisc(vsi, promisc_m);
376 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
378 vsi->current_netdev_flags |= IFF_ALLMULTI;
384 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
385 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
386 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
387 if (vsi->current_netdev_flags & IFF_PROMISC) {
388 /* Apply Rx filter rule to get traffic from wire */
389 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
390 err = ice_set_dflt_vsi(pf->first_sw, vsi);
391 if (err && err != -EEXIST) {
392 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
394 vsi->current_netdev_flags &=
399 vsi->vlan_ops.dis_rx_filtering(vsi);
402 /* Clear Rx filter to remove traffic from wire */
403 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
404 err = ice_clear_dflt_vsi(pf->first_sw);
406 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
408 vsi->current_netdev_flags |=
412 if (vsi->num_vlan > 1)
413 vsi->vlan_ops.ena_rx_filtering(vsi);
420 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
423 /* if something went wrong then set the changed flag so we try again */
424 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
425 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
427 clear_bit(ICE_CFG_BUSY, vsi->state);
432 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
433 * @pf: board private structure
435 static void ice_sync_fltr_subtask(struct ice_pf *pf)
439 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
442 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
444 ice_for_each_vsi(pf, v)
445 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
446 ice_vsi_sync_fltr(pf->vsi[v])) {
447 /* come back and try again later */
448 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
454 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
456 * @locked: is the rtnl_lock already held
458 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
463 ice_for_each_vsi(pf, v)
465 ice_dis_vsi(pf->vsi[v], locked);
467 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
468 pf->pf_agg_node[node].num_vsis = 0;
470 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
471 pf->vf_agg_node[node].num_vsis = 0;
475 * ice_clear_sw_switch_recipes - clear switch recipes
476 * @pf: board private structure
478 * Mark switch recipes as not created in sw structures. There are cases where
479 * rules (especially advanced rules) need to be restored, either re-read from
480 * hardware or added again. For example after the reset. 'recp_created' flag
481 * prevents from doing that and need to be cleared upfront.
483 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
485 struct ice_sw_recipe *recp;
488 recp = pf->hw.switch_info->recp_list;
489 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
490 recp[i].recp_created = false;
494 * ice_prepare_for_reset - prep for reset
495 * @pf: board private structure
496 * @reset_type: reset type requested
498 * Inform or close all dependent features in prep for reset.
501 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
503 struct ice_hw *hw = &pf->hw;
507 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
509 /* already prepared for reset */
510 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
513 ice_unplug_aux_dev(pf);
515 /* Notify VFs of impending reset */
516 if (ice_check_sq_alive(hw, &hw->mailboxq))
517 ice_vc_notify_reset(pf);
519 /* Disable VFs until reset is completed */
520 ice_for_each_vf(pf, i)
521 ice_set_vf_state_qs_dis(&pf->vf[i]);
523 if (ice_is_eswitch_mode_switchdev(pf)) {
524 if (reset_type != ICE_RESET_PFR)
525 ice_clear_sw_switch_recipes(pf);
528 /* release ADQ specific HW and SW resources */
529 vsi = ice_get_main_vsi(pf);
533 /* to be on safe side, reset orig_rss_size so that normal flow
534 * of deciding rss_size can take precedence
536 vsi->orig_rss_size = 0;
538 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
539 if (reset_type == ICE_RESET_PFR) {
540 vsi->old_ena_tc = vsi->all_enatc;
541 vsi->old_numtc = vsi->all_numtc;
543 ice_remove_q_channels(vsi, true);
545 /* for other reset type, do not support channel rebuild
546 * hence reset needed info
554 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
555 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
560 /* clear SW filtering DB */
561 ice_clear_hw_tbls(hw);
562 /* disable the VSIs and their queues that are not already DOWN */
563 ice_pf_dis_all_vsi(pf, false);
565 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
566 ice_ptp_prepare_for_reset(pf);
569 ice_sched_clear_port(hw->port_info);
571 ice_shutdown_all_ctrlq(hw);
573 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
577 * ice_do_reset - Initiate one of many types of resets
578 * @pf: board private structure
579 * @reset_type: reset type requested before this function was called.
581 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
583 struct device *dev = ice_pf_to_dev(pf);
584 struct ice_hw *hw = &pf->hw;
586 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
588 ice_prepare_for_reset(pf, reset_type);
590 /* trigger the reset */
591 if (ice_reset(hw, reset_type)) {
592 dev_err(dev, "reset %d failed\n", reset_type);
593 set_bit(ICE_RESET_FAILED, pf->state);
594 clear_bit(ICE_RESET_OICR_RECV, pf->state);
595 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
596 clear_bit(ICE_PFR_REQ, pf->state);
597 clear_bit(ICE_CORER_REQ, pf->state);
598 clear_bit(ICE_GLOBR_REQ, pf->state);
599 wake_up(&pf->reset_wait_queue);
603 /* PFR is a bit of a special case because it doesn't result in an OICR
604 * interrupt. So for PFR, rebuild after the reset and clear the reset-
605 * associated state bits.
607 if (reset_type == ICE_RESET_PFR) {
609 ice_rebuild(pf, reset_type);
610 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
611 clear_bit(ICE_PFR_REQ, pf->state);
612 wake_up(&pf->reset_wait_queue);
613 ice_reset_all_vfs(pf, true);
618 * ice_reset_subtask - Set up for resetting the device and driver
619 * @pf: board private structure
621 static void ice_reset_subtask(struct ice_pf *pf)
623 enum ice_reset_req reset_type = ICE_RESET_INVAL;
625 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
626 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
627 * of reset is pending and sets bits in pf->state indicating the reset
628 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
629 * prepare for pending reset if not already (for PF software-initiated
630 * global resets the software should already be prepared for it as
631 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
632 * by firmware or software on other PFs, that bit is not set so prepare
633 * for the reset now), poll for reset done, rebuild and return.
635 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
636 /* Perform the largest reset requested */
637 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
638 reset_type = ICE_RESET_CORER;
639 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
640 reset_type = ICE_RESET_GLOBR;
641 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
642 reset_type = ICE_RESET_EMPR;
643 /* return if no valid reset type requested */
644 if (reset_type == ICE_RESET_INVAL)
646 ice_prepare_for_reset(pf, reset_type);
648 /* make sure we are ready to rebuild */
649 if (ice_check_reset(&pf->hw)) {
650 set_bit(ICE_RESET_FAILED, pf->state);
652 /* done with reset. start rebuild */
653 pf->hw.reset_ongoing = false;
654 ice_rebuild(pf, reset_type);
655 /* clear bit to resume normal operations, but
656 * ICE_NEEDS_RESTART bit is set in case rebuild failed
658 clear_bit(ICE_RESET_OICR_RECV, pf->state);
659 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
660 clear_bit(ICE_PFR_REQ, pf->state);
661 clear_bit(ICE_CORER_REQ, pf->state);
662 clear_bit(ICE_GLOBR_REQ, pf->state);
663 wake_up(&pf->reset_wait_queue);
664 ice_reset_all_vfs(pf, true);
670 /* No pending resets to finish processing. Check for new resets */
671 if (test_bit(ICE_PFR_REQ, pf->state))
672 reset_type = ICE_RESET_PFR;
673 if (test_bit(ICE_CORER_REQ, pf->state))
674 reset_type = ICE_RESET_CORER;
675 if (test_bit(ICE_GLOBR_REQ, pf->state))
676 reset_type = ICE_RESET_GLOBR;
677 /* If no valid reset type requested just return */
678 if (reset_type == ICE_RESET_INVAL)
681 /* reset if not already down or busy */
682 if (!test_bit(ICE_DOWN, pf->state) &&
683 !test_bit(ICE_CFG_BUSY, pf->state)) {
684 ice_do_reset(pf, reset_type);
689 * ice_print_topo_conflict - print topology conflict message
690 * @vsi: the VSI whose topology status is being checked
692 static void ice_print_topo_conflict(struct ice_vsi *vsi)
694 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
695 case ICE_AQ_LINK_TOPO_CONFLICT:
696 case ICE_AQ_LINK_MEDIA_CONFLICT:
697 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
698 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
699 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
700 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
702 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
703 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
704 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
706 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
714 * ice_print_link_msg - print link up or down message
715 * @vsi: the VSI whose link status is being queried
716 * @isup: boolean for if the link is now up or down
718 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
720 struct ice_aqc_get_phy_caps_data *caps;
721 const char *an_advertised;
732 if (vsi->current_isup == isup)
735 vsi->current_isup = isup;
738 netdev_info(vsi->netdev, "NIC Link is Down\n");
742 switch (vsi->port_info->phy.link_info.link_speed) {
743 case ICE_AQ_LINK_SPEED_100GB:
746 case ICE_AQ_LINK_SPEED_50GB:
749 case ICE_AQ_LINK_SPEED_40GB:
752 case ICE_AQ_LINK_SPEED_25GB:
755 case ICE_AQ_LINK_SPEED_20GB:
758 case ICE_AQ_LINK_SPEED_10GB:
761 case ICE_AQ_LINK_SPEED_5GB:
764 case ICE_AQ_LINK_SPEED_2500MB:
767 case ICE_AQ_LINK_SPEED_1000MB:
770 case ICE_AQ_LINK_SPEED_100MB:
778 switch (vsi->port_info->fc.current_mode) {
782 case ICE_FC_TX_PAUSE:
785 case ICE_FC_RX_PAUSE:
796 /* Get FEC mode based on negotiated link info */
797 switch (vsi->port_info->phy.link_info.fec_info) {
798 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
799 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
802 case ICE_AQ_LINK_25G_KR_FEC_EN:
803 fec = "FC-FEC/BASE-R";
810 /* check if autoneg completed, might be false due to not supported */
811 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
816 /* Get FEC mode requested based on PHY caps last SW configuration */
817 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
820 an_advertised = "Unknown";
824 status = ice_aq_get_phy_caps(vsi->port_info, false,
825 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
827 netdev_info(vsi->netdev, "Get phy capability failed.\n");
829 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
831 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
832 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
834 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
835 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
836 fec_req = "FC-FEC/BASE-R";
843 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
844 speed, fec_req, fec, an_advertised, an, fc);
845 ice_print_topo_conflict(vsi);
849 * ice_vsi_link_event - update the VSI's netdev
850 * @vsi: the VSI on which the link event occurred
851 * @link_up: whether or not the VSI needs to be set up or down
853 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
858 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
861 if (vsi->type == ICE_VSI_PF) {
862 if (link_up == netif_carrier_ok(vsi->netdev))
866 netif_carrier_on(vsi->netdev);
867 netif_tx_wake_all_queues(vsi->netdev);
869 netif_carrier_off(vsi->netdev);
870 netif_tx_stop_all_queues(vsi->netdev);
876 * ice_set_dflt_mib - send a default config MIB to the FW
877 * @pf: private PF struct
879 * This function sends a default configuration MIB to the FW.
881 * If this function errors out at any point, the driver is still able to
882 * function. The main impact is that LFC may not operate as expected.
883 * Therefore an error state in this function should be treated with a DBG
884 * message and continue on with driver rebuild/reenable.
886 static void ice_set_dflt_mib(struct ice_pf *pf)
888 struct device *dev = ice_pf_to_dev(pf);
889 u8 mib_type, *buf, *lldpmib = NULL;
890 u16 len, typelen, offset = 0;
891 struct ice_lldp_org_tlv *tlv;
892 struct ice_hw *hw = &pf->hw;
895 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
896 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
898 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
903 /* Add ETS CFG TLV */
904 tlv = (struct ice_lldp_org_tlv *)lldpmib;
905 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
906 ICE_IEEE_ETS_TLV_LEN);
907 tlv->typelen = htons(typelen);
908 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
909 ICE_IEEE_SUBTYPE_ETS_CFG);
910 tlv->ouisubtype = htonl(ouisubtype);
915 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
916 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
917 * Octets 13 - 20 are TSA values - leave as zeros
920 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
922 tlv = (struct ice_lldp_org_tlv *)
923 ((char *)tlv + sizeof(tlv->typelen) + len);
925 /* Add ETS REC TLV */
927 tlv->typelen = htons(typelen);
929 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
930 ICE_IEEE_SUBTYPE_ETS_REC);
931 tlv->ouisubtype = htonl(ouisubtype);
933 /* First octet of buf is reserved
934 * Octets 1 - 4 map UP to TC - all UPs map to zero
935 * Octets 5 - 12 are BW values - set TC 0 to 100%.
936 * Octets 13 - 20 are TSA value - leave as zeros
940 tlv = (struct ice_lldp_org_tlv *)
941 ((char *)tlv + sizeof(tlv->typelen) + len);
943 /* Add PFC CFG TLV */
944 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
945 ICE_IEEE_PFC_TLV_LEN);
946 tlv->typelen = htons(typelen);
948 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
949 ICE_IEEE_SUBTYPE_PFC_CFG);
950 tlv->ouisubtype = htonl(ouisubtype);
952 /* Octet 1 left as all zeros - PFC disabled */
954 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
957 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
958 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
964 * ice_check_phy_fw_load - check if PHY FW load failed
965 * @pf: pointer to PF struct
966 * @link_cfg_err: bitmap from the link info structure
968 * check if external PHY FW load failed and print an error message if it did
970 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
972 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
973 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
977 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
980 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
981 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
982 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
987 * ice_check_module_power
988 * @pf: pointer to PF struct
989 * @link_cfg_err: bitmap from the link info structure
991 * check module power level returned by a previous call to aq_get_link_info
992 * and print error messages if module power level is not supported
994 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
996 /* if module power level is supported, clear the flag */
997 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
998 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
999 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1003 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1004 * above block didn't clear this bit, there's nothing to do
1006 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1009 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1010 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1011 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1012 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1013 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1014 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1019 * ice_check_link_cfg_err - check if link configuration failed
1020 * @pf: pointer to the PF struct
1021 * @link_cfg_err: bitmap from the link info structure
1023 * print if any link configuration failure happens due to the value in the
1024 * link_cfg_err parameter in the link info structure
1026 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1028 ice_check_module_power(pf, link_cfg_err);
1029 ice_check_phy_fw_load(pf, link_cfg_err);
1033 * ice_link_event - process the link event
1034 * @pf: PF that the link event is associated with
1035 * @pi: port_info for the port that the link event is associated with
1036 * @link_up: true if the physical link is up and false if it is down
1037 * @link_speed: current link speed received from the link event
1039 * Returns 0 on success and negative on failure
1042 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1045 struct device *dev = ice_pf_to_dev(pf);
1046 struct ice_phy_info *phy_info;
1047 struct ice_vsi *vsi;
1052 phy_info = &pi->phy;
1053 phy_info->link_info_old = phy_info->link_info;
1055 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1056 old_link_speed = phy_info->link_info_old.link_speed;
1058 /* update the link info structures and re-enable link events,
1059 * don't bail on failure due to other book keeping needed
1061 status = ice_update_link_info(pi);
1063 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1065 ice_aq_str(pi->hw->adminq.sq_last_status));
1067 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1069 /* Check if the link state is up after updating link info, and treat
1070 * this event as an UP event since the link is actually UP now.
1072 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1075 vsi = ice_get_main_vsi(pf);
1076 if (!vsi || !vsi->port_info)
1079 /* turn off PHY if media was removed */
1080 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1081 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1082 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1083 ice_set_link(vsi, false);
1086 /* if the old link up/down and speed is the same as the new */
1087 if (link_up == old_link && link_speed == old_link_speed)
1090 if (!ice_is_e810(&pf->hw))
1091 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1093 if (ice_is_dcb_active(pf)) {
1094 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1095 ice_dcb_rebuild(pf);
1098 ice_set_dflt_mib(pf);
1100 ice_vsi_link_event(vsi, link_up);
1101 ice_print_link_msg(vsi, link_up);
1103 ice_vc_notify_link_state(pf);
1109 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1110 * @pf: board private structure
1112 static void ice_watchdog_subtask(struct ice_pf *pf)
1116 /* if interface is down do nothing */
1117 if (test_bit(ICE_DOWN, pf->state) ||
1118 test_bit(ICE_CFG_BUSY, pf->state))
1121 /* make sure we don't do these things too often */
1122 if (time_before(jiffies,
1123 pf->serv_tmr_prev + pf->serv_tmr_period))
1126 pf->serv_tmr_prev = jiffies;
1128 /* Update the stats for active netdevs so the network stack
1129 * can look at updated numbers whenever it cares to
1131 ice_update_pf_stats(pf);
1132 ice_for_each_vsi(pf, i)
1133 if (pf->vsi[i] && pf->vsi[i]->netdev)
1134 ice_update_vsi_stats(pf->vsi[i]);
1138 * ice_init_link_events - enable/initialize link events
1139 * @pi: pointer to the port_info instance
1141 * Returns -EIO on failure, 0 on success
1143 static int ice_init_link_events(struct ice_port_info *pi)
1147 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1148 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1149 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1151 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1152 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1157 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1158 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1167 * ice_handle_link_event - handle link event via ARQ
1168 * @pf: PF that the link event is associated with
1169 * @event: event structure containing link status info
1172 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1174 struct ice_aqc_get_link_status_data *link_data;
1175 struct ice_port_info *port_info;
1178 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1179 port_info = pf->hw.port_info;
1183 status = ice_link_event(pf, port_info,
1184 !!(link_data->link_info & ICE_AQ_LINK_UP),
1185 le16_to_cpu(link_data->link_speed));
1187 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1193 enum ice_aq_task_state {
1194 ICE_AQ_TASK_WAITING = 0,
1195 ICE_AQ_TASK_COMPLETE,
1196 ICE_AQ_TASK_CANCELED,
1199 struct ice_aq_task {
1200 struct hlist_node entry;
1203 struct ice_rq_event_info *event;
1204 enum ice_aq_task_state state;
1208 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1209 * @pf: pointer to the PF private structure
1210 * @opcode: the opcode to wait for
1211 * @timeout: how long to wait, in jiffies
1212 * @event: storage for the event info
1214 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1215 * current thread will be put to sleep until the specified event occurs or
1216 * until the given timeout is reached.
1218 * To obtain only the descriptor contents, pass an event without an allocated
1219 * msg_buf. If the complete data buffer is desired, allocate the
1220 * event->msg_buf with enough space ahead of time.
1222 * Returns: zero on success, or a negative error code on failure.
1224 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1225 struct ice_rq_event_info *event)
1227 struct device *dev = ice_pf_to_dev(pf);
1228 struct ice_aq_task *task;
1229 unsigned long start;
1233 task = kzalloc(sizeof(*task), GFP_KERNEL);
1237 INIT_HLIST_NODE(&task->entry);
1238 task->opcode = opcode;
1239 task->event = event;
1240 task->state = ICE_AQ_TASK_WAITING;
1242 spin_lock_bh(&pf->aq_wait_lock);
1243 hlist_add_head(&task->entry, &pf->aq_wait_list);
1244 spin_unlock_bh(&pf->aq_wait_lock);
1248 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1250 switch (task->state) {
1251 case ICE_AQ_TASK_WAITING:
1252 err = ret < 0 ? ret : -ETIMEDOUT;
1254 case ICE_AQ_TASK_CANCELED:
1255 err = ret < 0 ? ret : -ECANCELED;
1257 case ICE_AQ_TASK_COMPLETE:
1258 err = ret < 0 ? ret : 0;
1261 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1266 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1267 jiffies_to_msecs(jiffies - start),
1268 jiffies_to_msecs(timeout),
1271 spin_lock_bh(&pf->aq_wait_lock);
1272 hlist_del(&task->entry);
1273 spin_unlock_bh(&pf->aq_wait_lock);
1280 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1281 * @pf: pointer to the PF private structure
1282 * @opcode: the opcode of the event
1283 * @event: the event to check
1285 * Loops over the current list of pending threads waiting for an AdminQ event.
1286 * For each matching task, copy the contents of the event into the task
1287 * structure and wake up the thread.
1289 * If multiple threads wait for the same opcode, they will all be woken up.
1291 * Note that event->msg_buf will only be duplicated if the event has a buffer
1292 * with enough space already allocated. Otherwise, only the descriptor and
1293 * message length will be copied.
1295 * Returns: true if an event was found, false otherwise
1297 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1298 struct ice_rq_event_info *event)
1300 struct ice_aq_task *task;
1303 spin_lock_bh(&pf->aq_wait_lock);
1304 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1305 if (task->state || task->opcode != opcode)
1308 memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
1309 task->event->msg_len = event->msg_len;
1311 /* Only copy the data buffer if a destination was set */
1312 if (task->event->msg_buf &&
1313 task->event->buf_len > event->buf_len) {
1314 memcpy(task->event->msg_buf, event->msg_buf,
1316 task->event->buf_len = event->buf_len;
1319 task->state = ICE_AQ_TASK_COMPLETE;
1322 spin_unlock_bh(&pf->aq_wait_lock);
1325 wake_up(&pf->aq_wait_queue);
1329 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1330 * @pf: the PF private structure
1332 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1333 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1335 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1337 struct ice_aq_task *task;
1339 spin_lock_bh(&pf->aq_wait_lock);
1340 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1341 task->state = ICE_AQ_TASK_CANCELED;
1342 spin_unlock_bh(&pf->aq_wait_lock);
1344 wake_up(&pf->aq_wait_queue);
1348 * __ice_clean_ctrlq - helper function to clean controlq rings
1349 * @pf: ptr to struct ice_pf
1350 * @q_type: specific Control queue type
1352 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1354 struct device *dev = ice_pf_to_dev(pf);
1355 struct ice_rq_event_info event;
1356 struct ice_hw *hw = &pf->hw;
1357 struct ice_ctl_q_info *cq;
1362 /* Do not clean control queue if/when PF reset fails */
1363 if (test_bit(ICE_RESET_FAILED, pf->state))
1367 case ICE_CTL_Q_ADMIN:
1375 case ICE_CTL_Q_MAILBOX:
1378 /* we are going to try to detect a malicious VF, so set the
1379 * state to begin detection
1381 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1384 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1388 /* check for error indications - PF_xx_AxQLEN register layout for
1389 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1391 val = rd32(hw, cq->rq.len);
1392 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1393 PF_FW_ARQLEN_ARQCRIT_M)) {
1395 if (val & PF_FW_ARQLEN_ARQVFE_M)
1396 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1398 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1399 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1402 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1403 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1405 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1406 PF_FW_ARQLEN_ARQCRIT_M);
1408 wr32(hw, cq->rq.len, val);
1411 val = rd32(hw, cq->sq.len);
1412 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1413 PF_FW_ATQLEN_ATQCRIT_M)) {
1415 if (val & PF_FW_ATQLEN_ATQVFE_M)
1416 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1418 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1419 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1422 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1423 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1425 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1426 PF_FW_ATQLEN_ATQCRIT_M);
1428 wr32(hw, cq->sq.len, val);
1431 event.buf_len = cq->rq_buf_size;
1432 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1440 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1441 if (ret == -EALREADY)
1444 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1449 opcode = le16_to_cpu(event.desc.opcode);
1451 /* Notify any thread that might be waiting for this event */
1452 ice_aq_check_events(pf, opcode, &event);
1455 case ice_aqc_opc_get_link_status:
1456 if (ice_handle_link_event(pf, &event))
1457 dev_err(dev, "Could not handle link event\n");
1459 case ice_aqc_opc_event_lan_overflow:
1460 ice_vf_lan_overflow_event(pf, &event);
1462 case ice_mbx_opc_send_msg_to_pf:
1463 if (!ice_is_malicious_vf(pf, &event, i, pending))
1464 ice_vc_process_vf_msg(pf, &event);
1466 case ice_aqc_opc_fw_logging:
1467 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1469 case ice_aqc_opc_lldp_set_mib_change:
1470 ice_dcb_process_lldp_set_mib_change(pf, &event);
1473 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1477 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1479 kfree(event.msg_buf);
1481 return pending && (i == ICE_DFLT_IRQ_WORK);
1485 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1486 * @hw: pointer to hardware info
1487 * @cq: control queue information
1489 * returns true if there are pending messages in a queue, false if there aren't
1491 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1495 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1496 return cq->rq.next_to_clean != ntu;
1500 * ice_clean_adminq_subtask - clean the AdminQ rings
1501 * @pf: board private structure
1503 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1505 struct ice_hw *hw = &pf->hw;
1507 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1510 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1513 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1515 /* There might be a situation where new messages arrive to a control
1516 * queue between processing the last message and clearing the
1517 * EVENT_PENDING bit. So before exiting, check queue head again (using
1518 * ice_ctrlq_pending) and process new messages if any.
1520 if (ice_ctrlq_pending(hw, &hw->adminq))
1521 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1527 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1528 * @pf: board private structure
1530 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1532 struct ice_hw *hw = &pf->hw;
1534 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1537 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1540 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1542 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1543 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1549 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1550 * @pf: board private structure
1552 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1554 struct ice_hw *hw = &pf->hw;
1556 /* Nothing to do here if sideband queue is not supported */
1557 if (!ice_is_sbq_supported(hw)) {
1558 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1562 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1565 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1568 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1570 if (ice_ctrlq_pending(hw, &hw->sbq))
1571 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1577 * ice_service_task_schedule - schedule the service task to wake up
1578 * @pf: board private structure
1580 * If not already scheduled, this puts the task into the work queue.
1582 void ice_service_task_schedule(struct ice_pf *pf)
1584 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1585 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1586 !test_bit(ICE_NEEDS_RESTART, pf->state))
1587 queue_work(ice_wq, &pf->serv_task);
1591 * ice_service_task_complete - finish up the service task
1592 * @pf: board private structure
1594 static void ice_service_task_complete(struct ice_pf *pf)
1596 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1598 /* force memory (pf->state) to sync before next service task */
1599 smp_mb__before_atomic();
1600 clear_bit(ICE_SERVICE_SCHED, pf->state);
1604 * ice_service_task_stop - stop service task and cancel works
1605 * @pf: board private structure
1607 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1610 static int ice_service_task_stop(struct ice_pf *pf)
1614 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1616 if (pf->serv_tmr.function)
1617 del_timer_sync(&pf->serv_tmr);
1618 if (pf->serv_task.func)
1619 cancel_work_sync(&pf->serv_task);
1621 clear_bit(ICE_SERVICE_SCHED, pf->state);
1626 * ice_service_task_restart - restart service task and schedule works
1627 * @pf: board private structure
1629 * This function is needed for suspend and resume works (e.g WoL scenario)
1631 static void ice_service_task_restart(struct ice_pf *pf)
1633 clear_bit(ICE_SERVICE_DIS, pf->state);
1634 ice_service_task_schedule(pf);
1638 * ice_service_timer - timer callback to schedule service task
1639 * @t: pointer to timer_list
1641 static void ice_service_timer(struct timer_list *t)
1643 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1645 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1646 ice_service_task_schedule(pf);
1650 * ice_handle_mdd_event - handle malicious driver detect event
1651 * @pf: pointer to the PF structure
1653 * Called from service task. OICR interrupt handler indicates MDD event.
1654 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1655 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1656 * disable the queue, the PF can be configured to reset the VF using ethtool
1657 * private flag mdd-auto-reset-vf.
1659 static void ice_handle_mdd_event(struct ice_pf *pf)
1661 struct device *dev = ice_pf_to_dev(pf);
1662 struct ice_hw *hw = &pf->hw;
1666 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1667 /* Since the VF MDD event logging is rate limited, check if
1668 * there are pending MDD events.
1670 ice_print_vfs_mdd_events(pf);
1674 /* find what triggered an MDD event */
1675 reg = rd32(hw, GL_MDET_TX_PQM);
1676 if (reg & GL_MDET_TX_PQM_VALID_M) {
1677 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1678 GL_MDET_TX_PQM_PF_NUM_S;
1679 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1680 GL_MDET_TX_PQM_VF_NUM_S;
1681 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1682 GL_MDET_TX_PQM_MAL_TYPE_S;
1683 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1684 GL_MDET_TX_PQM_QNUM_S);
1686 if (netif_msg_tx_err(pf))
1687 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1688 event, queue, pf_num, vf_num);
1689 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1692 reg = rd32(hw, GL_MDET_TX_TCLAN);
1693 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1694 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1695 GL_MDET_TX_TCLAN_PF_NUM_S;
1696 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1697 GL_MDET_TX_TCLAN_VF_NUM_S;
1698 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1699 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1700 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1701 GL_MDET_TX_TCLAN_QNUM_S);
1703 if (netif_msg_tx_err(pf))
1704 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1705 event, queue, pf_num, vf_num);
1706 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1709 reg = rd32(hw, GL_MDET_RX);
1710 if (reg & GL_MDET_RX_VALID_M) {
1711 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1712 GL_MDET_RX_PF_NUM_S;
1713 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1714 GL_MDET_RX_VF_NUM_S;
1715 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1716 GL_MDET_RX_MAL_TYPE_S;
1717 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1720 if (netif_msg_rx_err(pf))
1721 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1722 event, queue, pf_num, vf_num);
1723 wr32(hw, GL_MDET_RX, 0xffffffff);
1726 /* check to see if this PF caused an MDD event */
1727 reg = rd32(hw, PF_MDET_TX_PQM);
1728 if (reg & PF_MDET_TX_PQM_VALID_M) {
1729 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1730 if (netif_msg_tx_err(pf))
1731 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1734 reg = rd32(hw, PF_MDET_TX_TCLAN);
1735 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1736 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1737 if (netif_msg_tx_err(pf))
1738 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1741 reg = rd32(hw, PF_MDET_RX);
1742 if (reg & PF_MDET_RX_VALID_M) {
1743 wr32(hw, PF_MDET_RX, 0xFFFF);
1744 if (netif_msg_rx_err(pf))
1745 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1748 /* Check to see if one of the VFs caused an MDD event, and then
1749 * increment counters and set print pending
1751 ice_for_each_vf(pf, i) {
1752 struct ice_vf *vf = &pf->vf[i];
1754 reg = rd32(hw, VP_MDET_TX_PQM(i));
1755 if (reg & VP_MDET_TX_PQM_VALID_M) {
1756 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1757 vf->mdd_tx_events.count++;
1758 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1759 if (netif_msg_tx_err(pf))
1760 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1764 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1765 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1766 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1767 vf->mdd_tx_events.count++;
1768 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1769 if (netif_msg_tx_err(pf))
1770 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1774 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1775 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1776 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1777 vf->mdd_tx_events.count++;
1778 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1779 if (netif_msg_tx_err(pf))
1780 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1784 reg = rd32(hw, VP_MDET_RX(i));
1785 if (reg & VP_MDET_RX_VALID_M) {
1786 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1787 vf->mdd_rx_events.count++;
1788 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1789 if (netif_msg_rx_err(pf))
1790 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1793 /* Since the queue is disabled on VF Rx MDD events, the
1794 * PF can be configured to reset the VF through ethtool
1795 * private flag mdd-auto-reset-vf.
1797 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1798 /* VF MDD event counters will be cleared by
1799 * reset, so print the event prior to reset.
1801 ice_print_vf_rx_mdd_event(vf);
1802 ice_reset_vf(&pf->vf[i], false);
1807 ice_print_vfs_mdd_events(pf);
1811 * ice_force_phys_link_state - Force the physical link state
1812 * @vsi: VSI to force the physical link state to up/down
1813 * @link_up: true/false indicates to set the physical link to up/down
1815 * Force the physical link state by getting the current PHY capabilities from
1816 * hardware and setting the PHY config based on the determined capabilities. If
1817 * link changes a link event will be triggered because both the Enable Automatic
1818 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1820 * Returns 0 on success, negative on failure
1822 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1824 struct ice_aqc_get_phy_caps_data *pcaps;
1825 struct ice_aqc_set_phy_cfg_data *cfg;
1826 struct ice_port_info *pi;
1830 if (!vsi || !vsi->port_info || !vsi->back)
1832 if (vsi->type != ICE_VSI_PF)
1835 dev = ice_pf_to_dev(vsi->back);
1837 pi = vsi->port_info;
1839 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1843 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1846 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1847 vsi->vsi_num, retcode);
1852 /* No change in link */
1853 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1854 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1857 /* Use the current user PHY configuration. The current user PHY
1858 * configuration is initialized during probe from PHY capabilities
1859 * software mode, and updated on set PHY configuration.
1861 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1867 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1869 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1871 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1873 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1875 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1876 vsi->vsi_num, retcode);
1887 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1888 * @pi: port info structure
1890 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1892 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1894 struct ice_aqc_get_phy_caps_data *pcaps;
1895 struct ice_pf *pf = pi->hw->back;
1898 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1902 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1906 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1910 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1911 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1919 * ice_init_link_dflt_override - Initialize link default override
1920 * @pi: port info structure
1922 * Initialize link default override and PHY total port shutdown during probe
1924 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1926 struct ice_link_default_override_tlv *ldo;
1927 struct ice_pf *pf = pi->hw->back;
1929 ldo = &pf->link_dflt_override;
1930 if (ice_get_link_default_override(ldo, pi))
1933 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1936 /* Enable Total Port Shutdown (override/replace link-down-on-close
1937 * ethtool private flag) for ports with Port Disable bit set.
1939 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1940 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1944 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1945 * @pi: port info structure
1947 * If default override is enabled, initialize the user PHY cfg speed and FEC
1948 * settings using the default override mask from the NVM.
1950 * The PHY should only be configured with the default override settings the
1951 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1952 * is used to indicate that the user PHY cfg default override is initialized
1953 * and the PHY has not been configured with the default override settings. The
1954 * state is set here, and cleared in ice_configure_phy the first time the PHY is
1957 * This function should be called only if the FW doesn't support default
1958 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1960 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1962 struct ice_link_default_override_tlv *ldo;
1963 struct ice_aqc_set_phy_cfg_data *cfg;
1964 struct ice_phy_info *phy = &pi->phy;
1965 struct ice_pf *pf = pi->hw->back;
1967 ldo = &pf->link_dflt_override;
1969 /* If link default override is enabled, use to mask NVM PHY capabilities
1970 * for speed and FEC default configuration.
1972 cfg = &phy->curr_user_phy_cfg;
1974 if (ldo->phy_type_low || ldo->phy_type_high) {
1975 cfg->phy_type_low = pf->nvm_phy_type_lo &
1976 cpu_to_le64(ldo->phy_type_low);
1977 cfg->phy_type_high = pf->nvm_phy_type_hi &
1978 cpu_to_le64(ldo->phy_type_high);
1980 cfg->link_fec_opt = ldo->fec_options;
1981 phy->curr_user_fec_req = ICE_FEC_AUTO;
1983 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1987 * ice_init_phy_user_cfg - Initialize the PHY user configuration
1988 * @pi: port info structure
1990 * Initialize the current user PHY configuration, speed, FEC, and FC requested
1991 * mode to default. The PHY defaults are from get PHY capabilities topology
1992 * with media so call when media is first available. An error is returned if
1993 * called when media is not available. The PHY initialization completed state is
1996 * These configurations are used when setting PHY
1997 * configuration. The user PHY configuration is updated on set PHY
1998 * configuration. Returns 0 on success, negative on failure
2000 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2002 struct ice_aqc_get_phy_caps_data *pcaps;
2003 struct ice_phy_info *phy = &pi->phy;
2004 struct ice_pf *pf = pi->hw->back;
2007 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2010 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2014 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2015 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2018 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2021 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2025 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2027 /* check if lenient mode is supported and enabled */
2028 if (ice_fw_supports_link_override(pi->hw) &&
2029 !(pcaps->module_compliance_enforcement &
2030 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2031 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2033 /* if the FW supports default PHY configuration mode, then the driver
2034 * does not have to apply link override settings. If not,
2035 * initialize user PHY configuration with link override values
2037 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2038 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2039 ice_init_phy_cfg_dflt_override(pi);
2044 /* if link default override is not enabled, set user flow control and
2045 * FEC settings based on what get_phy_caps returned
2047 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2048 pcaps->link_fec_options);
2049 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2052 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2053 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2060 * ice_configure_phy - configure PHY
2063 * Set the PHY configuration. If the current PHY configuration is the same as
2064 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2065 * configure the based get PHY capabilities for topology with media.
2067 static int ice_configure_phy(struct ice_vsi *vsi)
2069 struct device *dev = ice_pf_to_dev(vsi->back);
2070 struct ice_port_info *pi = vsi->port_info;
2071 struct ice_aqc_get_phy_caps_data *pcaps;
2072 struct ice_aqc_set_phy_cfg_data *cfg;
2073 struct ice_phy_info *phy = &pi->phy;
2074 struct ice_pf *pf = vsi->back;
2077 /* Ensure we have media as we cannot configure a medialess port */
2078 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2081 ice_print_topo_conflict(vsi);
2083 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2084 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2087 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2088 return ice_force_phys_link_state(vsi, true);
2090 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2094 /* Get current PHY config */
2095 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2098 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2103 /* If PHY enable link is configured and configuration has not changed,
2104 * there's nothing to do
2106 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2107 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2110 /* Use PHY topology as baseline for configuration */
2111 memset(pcaps, 0, sizeof(*pcaps));
2112 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2113 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2116 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2119 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2124 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2130 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2132 /* Speed - If default override pending, use curr_user_phy_cfg set in
2133 * ice_init_phy_user_cfg_ldo.
2135 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2136 vsi->back->state)) {
2137 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2138 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2140 u64 phy_low = 0, phy_high = 0;
2142 ice_update_phy_type(&phy_low, &phy_high,
2143 pi->phy.curr_user_speed_req);
2144 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2145 cfg->phy_type_high = pcaps->phy_type_high &
2146 cpu_to_le64(phy_high);
2149 /* Can't provide what was requested; use PHY capabilities */
2150 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2151 cfg->phy_type_low = pcaps->phy_type_low;
2152 cfg->phy_type_high = pcaps->phy_type_high;
2156 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2158 /* Can't provide what was requested; use PHY capabilities */
2159 if (cfg->link_fec_opt !=
2160 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2161 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2162 cfg->link_fec_opt = pcaps->link_fec_options;
2165 /* Flow Control - always supported; no need to check against
2168 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2170 /* Enable link and link update */
2171 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2173 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2175 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2185 * ice_check_media_subtask - Check for media
2186 * @pf: pointer to PF struct
2188 * If media is available, then initialize PHY user configuration if it is not
2189 * been, and configure the PHY if the interface is up.
2191 static void ice_check_media_subtask(struct ice_pf *pf)
2193 struct ice_port_info *pi;
2194 struct ice_vsi *vsi;
2197 /* No need to check for media if it's already present */
2198 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2201 vsi = ice_get_main_vsi(pf);
2205 /* Refresh link info and check if media is present */
2206 pi = vsi->port_info;
2207 err = ice_update_link_info(pi);
2211 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2213 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2214 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2215 ice_init_phy_user_cfg(pi);
2217 /* PHY settings are reset on media insertion, reconfigure
2218 * PHY to preserve settings.
2220 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2221 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2224 err = ice_configure_phy(vsi);
2226 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2228 /* A Link Status Event will be generated; the event handler
2229 * will complete bringing the interface up
2235 * ice_service_task - manage and run subtasks
2236 * @work: pointer to work_struct contained by the PF struct
2238 static void ice_service_task(struct work_struct *work)
2240 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2241 unsigned long start_time = jiffies;
2245 /* process reset requests first */
2246 ice_reset_subtask(pf);
2248 /* bail if a reset/recovery cycle is pending or rebuild failed */
2249 if (ice_is_reset_in_progress(pf->state) ||
2250 test_bit(ICE_SUSPENDED, pf->state) ||
2251 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2252 ice_service_task_complete(pf);
2256 ice_clean_adminq_subtask(pf);
2257 ice_check_media_subtask(pf);
2258 ice_check_for_hang_subtask(pf);
2259 ice_sync_fltr_subtask(pf);
2260 ice_handle_mdd_event(pf);
2261 ice_watchdog_subtask(pf);
2263 if (ice_is_safe_mode(pf)) {
2264 ice_service_task_complete(pf);
2268 ice_process_vflr_event(pf);
2269 ice_clean_mailboxq_subtask(pf);
2270 ice_clean_sbq_subtask(pf);
2271 ice_sync_arfs_fltrs(pf);
2272 ice_flush_fdir_ctx(pf);
2274 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2275 ice_service_task_complete(pf);
2277 /* If the tasks have taken longer than one service timer period
2278 * or there is more work to be done, reset the service timer to
2279 * schedule the service task now.
2281 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2282 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2283 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2284 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2285 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2286 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2287 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2288 mod_timer(&pf->serv_tmr, jiffies);
2292 * ice_set_ctrlq_len - helper function to set controlq length
2293 * @hw: pointer to the HW instance
2295 static void ice_set_ctrlq_len(struct ice_hw *hw)
2297 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2298 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2299 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2300 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2301 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2302 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2303 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2304 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2305 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2306 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2307 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2308 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2312 * ice_schedule_reset - schedule a reset
2313 * @pf: board private structure
2314 * @reset: reset being requested
2316 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2318 struct device *dev = ice_pf_to_dev(pf);
2320 /* bail out if earlier reset has failed */
2321 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2322 dev_dbg(dev, "earlier reset has failed\n");
2325 /* bail if reset/recovery already in progress */
2326 if (ice_is_reset_in_progress(pf->state)) {
2327 dev_dbg(dev, "Reset already in progress\n");
2331 ice_unplug_aux_dev(pf);
2335 set_bit(ICE_PFR_REQ, pf->state);
2337 case ICE_RESET_CORER:
2338 set_bit(ICE_CORER_REQ, pf->state);
2340 case ICE_RESET_GLOBR:
2341 set_bit(ICE_GLOBR_REQ, pf->state);
2347 ice_service_task_schedule(pf);
2352 * ice_irq_affinity_notify - Callback for affinity changes
2353 * @notify: context as to what irq was changed
2354 * @mask: the new affinity mask
2356 * This is a callback function used by the irq_set_affinity_notifier function
2357 * so that we may register to receive changes to the irq affinity masks.
2360 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2361 const cpumask_t *mask)
2363 struct ice_q_vector *q_vector =
2364 container_of(notify, struct ice_q_vector, affinity_notify);
2366 cpumask_copy(&q_vector->affinity_mask, mask);
2370 * ice_irq_affinity_release - Callback for affinity notifier release
2371 * @ref: internal core kernel usage
2373 * This is a callback function used by the irq_set_affinity_notifier function
2374 * to inform the current notification subscriber that they will no longer
2375 * receive notifications.
2377 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2380 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2381 * @vsi: the VSI being configured
2383 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2385 struct ice_hw *hw = &vsi->back->hw;
2388 ice_for_each_q_vector(vsi, i)
2389 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2396 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2397 * @vsi: the VSI being configured
2398 * @basename: name for the vector
2400 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2402 int q_vectors = vsi->num_q_vectors;
2403 struct ice_pf *pf = vsi->back;
2404 int base = vsi->base_vector;
2411 dev = ice_pf_to_dev(pf);
2412 for (vector = 0; vector < q_vectors; vector++) {
2413 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2415 irq_num = pf->msix_entries[base + vector].vector;
2417 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2418 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2419 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2421 } else if (q_vector->rx.rx_ring) {
2422 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2423 "%s-%s-%d", basename, "rx", rx_int_idx++);
2424 } else if (q_vector->tx.tx_ring) {
2425 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2426 "%s-%s-%d", basename, "tx", tx_int_idx++);
2428 /* skip this unused q_vector */
2431 if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2432 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2433 IRQF_SHARED, q_vector->name,
2436 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2437 0, q_vector->name, q_vector);
2439 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2444 /* register for affinity change notifications */
2445 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2446 struct irq_affinity_notify *affinity_notify;
2448 affinity_notify = &q_vector->affinity_notify;
2449 affinity_notify->notify = ice_irq_affinity_notify;
2450 affinity_notify->release = ice_irq_affinity_release;
2451 irq_set_affinity_notifier(irq_num, affinity_notify);
2454 /* assign the mask for this irq */
2455 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2458 vsi->irqs_ready = true;
2464 irq_num = pf->msix_entries[base + vector].vector;
2465 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2466 irq_set_affinity_notifier(irq_num, NULL);
2467 irq_set_affinity_hint(irq_num, NULL);
2468 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2474 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2475 * @vsi: VSI to setup Tx rings used by XDP
2477 * Return 0 on success and negative value on error
2479 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2481 struct device *dev = ice_pf_to_dev(vsi->back);
2482 struct ice_tx_desc *tx_desc;
2485 ice_for_each_xdp_txq(vsi, i) {
2486 u16 xdp_q_idx = vsi->alloc_txq + i;
2487 struct ice_tx_ring *xdp_ring;
2489 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2492 goto free_xdp_rings;
2494 xdp_ring->q_index = xdp_q_idx;
2495 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2496 xdp_ring->vsi = vsi;
2497 xdp_ring->netdev = NULL;
2498 xdp_ring->next_dd = ICE_TX_THRESH - 1;
2499 xdp_ring->next_rs = ICE_TX_THRESH - 1;
2500 xdp_ring->dev = dev;
2501 xdp_ring->count = vsi->num_tx_desc;
2502 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2503 if (ice_setup_tx_ring(xdp_ring))
2504 goto free_xdp_rings;
2505 ice_set_ring_xdp(xdp_ring);
2506 xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
2507 spin_lock_init(&xdp_ring->tx_lock);
2508 for (j = 0; j < xdp_ring->count; j++) {
2509 tx_desc = ICE_TX_DESC(xdp_ring, j);
2510 tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
2514 ice_for_each_rxq(vsi, i) {
2515 if (static_key_enabled(&ice_xdp_locking_key))
2516 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2518 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
2525 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2526 ice_free_tx_ring(vsi->xdp_rings[i]);
2531 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2532 * @vsi: VSI to set the bpf prog on
2533 * @prog: the bpf prog pointer
2535 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2537 struct bpf_prog *old_prog;
2540 old_prog = xchg(&vsi->xdp_prog, prog);
2542 bpf_prog_put(old_prog);
2544 ice_for_each_rxq(vsi, i)
2545 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2549 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2550 * @vsi: VSI to bring up Tx rings used by XDP
2551 * @prog: bpf program that will be assigned to VSI
2553 * Return 0 on success and negative value on error
2555 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2557 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2558 int xdp_rings_rem = vsi->num_xdp_txq;
2559 struct ice_pf *pf = vsi->back;
2560 struct ice_qs_cfg xdp_qs_cfg = {
2561 .qs_mutex = &pf->avail_q_mutex,
2562 .pf_map = pf->avail_txqs,
2563 .pf_map_size = pf->max_pf_txqs,
2564 .q_count = vsi->num_xdp_txq,
2565 .scatter_count = ICE_MAX_SCATTER_TXQS,
2566 .vsi_map = vsi->txq_map,
2567 .vsi_map_offset = vsi->alloc_txq,
2568 .mapping_mode = ICE_VSI_MAP_CONTIG
2574 dev = ice_pf_to_dev(pf);
2575 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2576 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2577 if (!vsi->xdp_rings)
2580 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2581 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2584 if (static_key_enabled(&ice_xdp_locking_key))
2585 netdev_warn(vsi->netdev,
2586 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2588 if (ice_xdp_alloc_setup_rings(vsi))
2589 goto clear_xdp_rings;
2591 /* follow the logic from ice_vsi_map_rings_to_vectors */
2592 ice_for_each_q_vector(vsi, v_idx) {
2593 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2594 int xdp_rings_per_v, q_id, q_base;
2596 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2597 vsi->num_q_vectors - v_idx);
2598 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2600 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2601 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2603 xdp_ring->q_vector = q_vector;
2604 xdp_ring->next = q_vector->tx.tx_ring;
2605 q_vector->tx.tx_ring = xdp_ring;
2607 xdp_rings_rem -= xdp_rings_per_v;
2610 /* omit the scheduler update if in reset path; XDP queues will be
2611 * taken into account at the end of ice_vsi_rebuild, where
2612 * ice_cfg_vsi_lan is being called
2614 if (ice_is_reset_in_progress(pf->state))
2617 /* tell the Tx scheduler that right now we have
2620 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2621 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2623 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2626 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2628 goto clear_xdp_rings;
2631 /* assign the prog only when it's not already present on VSI;
2632 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2633 * VSI rebuild that happens under ethtool -L can expose us to
2634 * the bpf_prog refcount issues as we would be swapping same
2635 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2636 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2637 * this is not harmful as dev_xdp_install bumps the refcount
2638 * before calling the op exposed by the driver;
2640 if (!ice_is_xdp_ena_vsi(vsi))
2641 ice_vsi_assign_bpf_prog(vsi, prog);
2645 ice_for_each_xdp_txq(vsi, i)
2646 if (vsi->xdp_rings[i]) {
2647 kfree_rcu(vsi->xdp_rings[i], rcu);
2648 vsi->xdp_rings[i] = NULL;
2652 mutex_lock(&pf->avail_q_mutex);
2653 ice_for_each_xdp_txq(vsi, i) {
2654 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2655 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2657 mutex_unlock(&pf->avail_q_mutex);
2659 devm_kfree(dev, vsi->xdp_rings);
2664 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2665 * @vsi: VSI to remove XDP rings
2667 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2670 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2672 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2673 struct ice_pf *pf = vsi->back;
2676 /* q_vectors are freed in reset path so there's no point in detaching
2677 * rings; in case of rebuild being triggered not from reset bits
2678 * in pf->state won't be set, so additionally check first q_vector
2681 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2684 ice_for_each_q_vector(vsi, v_idx) {
2685 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2686 struct ice_tx_ring *ring;
2688 ice_for_each_tx_ring(ring, q_vector->tx)
2689 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2692 /* restore the value of last node prior to XDP setup */
2693 q_vector->tx.tx_ring = ring;
2697 mutex_lock(&pf->avail_q_mutex);
2698 ice_for_each_xdp_txq(vsi, i) {
2699 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2700 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2702 mutex_unlock(&pf->avail_q_mutex);
2704 ice_for_each_xdp_txq(vsi, i)
2705 if (vsi->xdp_rings[i]) {
2706 if (vsi->xdp_rings[i]->desc)
2707 ice_free_tx_ring(vsi->xdp_rings[i]);
2708 kfree_rcu(vsi->xdp_rings[i], rcu);
2709 vsi->xdp_rings[i] = NULL;
2712 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2713 vsi->xdp_rings = NULL;
2715 if (static_key_enabled(&ice_xdp_locking_key))
2716 static_branch_dec(&ice_xdp_locking_key);
2718 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2721 ice_vsi_assign_bpf_prog(vsi, NULL);
2723 /* notify Tx scheduler that we destroyed XDP queues and bring
2724 * back the old number of child nodes
2726 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2727 max_txqs[i] = vsi->num_txq;
2729 /* change number of XDP Tx queues to 0 */
2730 vsi->num_xdp_txq = 0;
2732 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2737 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2738 * @vsi: VSI to schedule napi on
2740 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2744 ice_for_each_rxq(vsi, i) {
2745 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2747 if (rx_ring->xsk_pool)
2748 napi_schedule(&rx_ring->q_vector->napi);
2753 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2754 * @vsi: VSI to determine the count of XDP Tx qs
2756 * returns 0 if Tx qs count is higher than at least half of CPU count,
2759 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2761 u16 avail = ice_get_avail_txq_count(vsi->back);
2762 u16 cpus = num_possible_cpus();
2764 if (avail < cpus / 2)
2767 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2769 if (vsi->num_xdp_txq < cpus)
2770 static_branch_inc(&ice_xdp_locking_key);
2776 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2777 * @vsi: VSI to setup XDP for
2778 * @prog: XDP program
2779 * @extack: netlink extended ack
2782 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2783 struct netlink_ext_ack *extack)
2785 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2786 bool if_running = netif_running(vsi->netdev);
2787 int ret = 0, xdp_ring_err = 0;
2789 if (frame_size > vsi->rx_buf_len) {
2790 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2794 /* need to stop netdev while setting up the program for Rx rings */
2795 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2796 ret = ice_down(vsi);
2798 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2803 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2804 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2806 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2808 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2810 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2812 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2813 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2815 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2817 /* safe to call even when prog == vsi->xdp_prog as
2818 * dev_xdp_install in net/core/dev.c incremented prog's
2819 * refcount so corresponding bpf_prog_put won't cause
2822 ice_vsi_assign_bpf_prog(vsi, prog);
2829 ice_vsi_rx_napi_schedule(vsi);
2831 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2835 * ice_xdp_safe_mode - XDP handler for safe mode
2839 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2840 struct netdev_bpf *xdp)
2842 NL_SET_ERR_MSG_MOD(xdp->extack,
2843 "Please provide working DDP firmware package in order to use XDP\n"
2844 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2849 * ice_xdp - implements XDP handler
2853 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2855 struct ice_netdev_priv *np = netdev_priv(dev);
2856 struct ice_vsi *vsi = np->vsi;
2858 if (vsi->type != ICE_VSI_PF) {
2859 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2863 switch (xdp->command) {
2864 case XDP_SETUP_PROG:
2865 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2866 case XDP_SETUP_XSK_POOL:
2867 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2875 * ice_ena_misc_vector - enable the non-queue interrupts
2876 * @pf: board private structure
2878 static void ice_ena_misc_vector(struct ice_pf *pf)
2880 struct ice_hw *hw = &pf->hw;
2883 /* Disable anti-spoof detection interrupt to prevent spurious event
2884 * interrupts during a function reset. Anti-spoof functionally is
2887 val = rd32(hw, GL_MDCK_TX_TDPU);
2888 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2889 wr32(hw, GL_MDCK_TX_TDPU, val);
2891 /* clear things first */
2892 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
2893 rd32(hw, PFINT_OICR); /* read to clear */
2895 val = (PFINT_OICR_ECC_ERR_M |
2896 PFINT_OICR_MAL_DETECT_M |
2898 PFINT_OICR_PCI_EXCEPTION_M |
2900 PFINT_OICR_HMC_ERR_M |
2901 PFINT_OICR_PE_PUSH_M |
2902 PFINT_OICR_PE_CRITERR_M);
2904 wr32(hw, PFINT_OICR_ENA, val);
2906 /* SW_ITR_IDX = 0, but don't change INTENA */
2907 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2908 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2912 * ice_misc_intr - misc interrupt handler
2913 * @irq: interrupt number
2914 * @data: pointer to a q_vector
2916 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2918 struct ice_pf *pf = (struct ice_pf *)data;
2919 struct ice_hw *hw = &pf->hw;
2920 irqreturn_t ret = IRQ_NONE;
2924 dev = ice_pf_to_dev(pf);
2925 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2926 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2927 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2929 oicr = rd32(hw, PFINT_OICR);
2930 ena_mask = rd32(hw, PFINT_OICR_ENA);
2932 if (oicr & PFINT_OICR_SWINT_M) {
2933 ena_mask &= ~PFINT_OICR_SWINT_M;
2937 if (oicr & PFINT_OICR_MAL_DETECT_M) {
2938 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2939 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2941 if (oicr & PFINT_OICR_VFLR_M) {
2942 /* disable any further VFLR event notifications */
2943 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2944 u32 reg = rd32(hw, PFINT_OICR_ENA);
2946 reg &= ~PFINT_OICR_VFLR_M;
2947 wr32(hw, PFINT_OICR_ENA, reg);
2949 ena_mask &= ~PFINT_OICR_VFLR_M;
2950 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2954 if (oicr & PFINT_OICR_GRST_M) {
2957 /* we have a reset warning */
2958 ena_mask &= ~PFINT_OICR_GRST_M;
2959 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2960 GLGEN_RSTAT_RESET_TYPE_S;
2962 if (reset == ICE_RESET_CORER)
2964 else if (reset == ICE_RESET_GLOBR)
2966 else if (reset == ICE_RESET_EMPR)
2969 dev_dbg(dev, "Invalid reset type %d\n", reset);
2971 /* If a reset cycle isn't already in progress, we set a bit in
2972 * pf->state so that the service task can start a reset/rebuild.
2974 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2975 if (reset == ICE_RESET_CORER)
2976 set_bit(ICE_CORER_RECV, pf->state);
2977 else if (reset == ICE_RESET_GLOBR)
2978 set_bit(ICE_GLOBR_RECV, pf->state);
2980 set_bit(ICE_EMPR_RECV, pf->state);
2982 /* There are couple of different bits at play here.
2983 * hw->reset_ongoing indicates whether the hardware is
2984 * in reset. This is set to true when a reset interrupt
2985 * is received and set back to false after the driver
2986 * has determined that the hardware is out of reset.
2988 * ICE_RESET_OICR_RECV in pf->state indicates
2989 * that a post reset rebuild is required before the
2990 * driver is operational again. This is set above.
2992 * As this is the start of the reset/rebuild cycle, set
2993 * both to indicate that.
2995 hw->reset_ongoing = true;
2999 if (oicr & PFINT_OICR_TSYN_TX_M) {
3000 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3001 ice_ptp_process_ts(pf);
3004 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3005 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3006 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3008 /* Save EVENTs from GTSYN register */
3009 pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
3010 GLTSYN_STAT_EVENT1_M |
3011 GLTSYN_STAT_EVENT2_M);
3012 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3013 kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
3016 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3017 if (oicr & ICE_AUX_CRIT_ERR) {
3018 struct iidc_event *event;
3020 ena_mask &= ~ICE_AUX_CRIT_ERR;
3021 event = kzalloc(sizeof(*event), GFP_KERNEL);
3023 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
3024 /* report the entire OICR value to AUX driver */
3026 ice_send_event_to_aux(pf, event);
3031 /* Report any remaining unexpected interrupts */
3034 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3035 /* If a critical error is pending there is no choice but to
3038 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3039 PFINT_OICR_ECC_ERR_M)) {
3040 set_bit(ICE_PFR_REQ, pf->state);
3041 ice_service_task_schedule(pf);
3046 ice_service_task_schedule(pf);
3047 ice_irq_dynamic_ena(hw, NULL, NULL);
3053 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3054 * @hw: pointer to HW structure
3056 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3058 /* disable Admin queue Interrupt causes */
3059 wr32(hw, PFINT_FW_CTL,
3060 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3062 /* disable Mailbox queue Interrupt causes */
3063 wr32(hw, PFINT_MBX_CTL,
3064 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3066 wr32(hw, PFINT_SB_CTL,
3067 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3069 /* disable Control queue Interrupt causes */
3070 wr32(hw, PFINT_OICR_CTL,
3071 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3077 * ice_free_irq_msix_misc - Unroll misc vector setup
3078 * @pf: board private structure
3080 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3082 struct ice_hw *hw = &pf->hw;
3084 ice_dis_ctrlq_interrupts(hw);
3086 /* disable OICR interrupt */
3087 wr32(hw, PFINT_OICR_ENA, 0);
3090 if (pf->msix_entries) {
3091 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
3092 devm_free_irq(ice_pf_to_dev(pf),
3093 pf->msix_entries[pf->oicr_idx].vector, pf);
3096 pf->num_avail_sw_msix += 1;
3097 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3101 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3102 * @hw: pointer to HW structure
3103 * @reg_idx: HW vector index to associate the control queue interrupts with
3105 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3109 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3110 PFINT_OICR_CTL_CAUSE_ENA_M);
3111 wr32(hw, PFINT_OICR_CTL, val);
3113 /* enable Admin queue Interrupt causes */
3114 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3115 PFINT_FW_CTL_CAUSE_ENA_M);
3116 wr32(hw, PFINT_FW_CTL, val);
3118 /* enable Mailbox queue Interrupt causes */
3119 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3120 PFINT_MBX_CTL_CAUSE_ENA_M);
3121 wr32(hw, PFINT_MBX_CTL, val);
3123 /* This enables Sideband queue Interrupt causes */
3124 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3125 PFINT_SB_CTL_CAUSE_ENA_M);
3126 wr32(hw, PFINT_SB_CTL, val);
3132 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3133 * @pf: board private structure
3135 * This sets up the handler for MSIX 0, which is used to manage the
3136 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3137 * when in MSI or Legacy interrupt mode.
3139 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3141 struct device *dev = ice_pf_to_dev(pf);
3142 struct ice_hw *hw = &pf->hw;
3143 int oicr_idx, err = 0;
3145 if (!pf->int_name[0])
3146 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3147 dev_driver_string(dev), dev_name(dev));
3149 /* Do not request IRQ but do enable OICR interrupt since settings are
3150 * lost during reset. Note that this function is called only during
3151 * rebuild path and not while reset is in progress.
3153 if (ice_is_reset_in_progress(pf->state))
3156 /* reserve one vector in irq_tracker for misc interrupts */
3157 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3161 pf->num_avail_sw_msix -= 1;
3162 pf->oicr_idx = (u16)oicr_idx;
3164 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
3165 ice_misc_intr, 0, pf->int_name, pf);
3167 dev_err(dev, "devm_request_irq for %s failed: %d\n",
3169 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3170 pf->num_avail_sw_msix += 1;
3175 ice_ena_misc_vector(pf);
3177 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3178 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3179 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3182 ice_irq_dynamic_ena(hw, NULL, NULL);
3188 * ice_napi_add - register NAPI handler for the VSI
3189 * @vsi: VSI for which NAPI handler is to be registered
3191 * This function is only called in the driver's load path. Registering the NAPI
3192 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3193 * reset/rebuild, etc.)
3195 static void ice_napi_add(struct ice_vsi *vsi)
3202 ice_for_each_q_vector(vsi, v_idx)
3203 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3204 ice_napi_poll, NAPI_POLL_WEIGHT);
3208 * ice_set_ops - set netdev and ethtools ops for the given netdev
3209 * @netdev: netdev instance
3211 static void ice_set_ops(struct net_device *netdev)
3213 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3215 if (ice_is_safe_mode(pf)) {
3216 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3217 ice_set_ethtool_safe_mode_ops(netdev);
3221 netdev->netdev_ops = &ice_netdev_ops;
3222 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3223 ice_set_ethtool_ops(netdev);
3227 * ice_set_netdev_features - set features for the given netdev
3228 * @netdev: netdev instance
3230 static void ice_set_netdev_features(struct net_device *netdev)
3232 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3233 netdev_features_t csumo_features;
3234 netdev_features_t vlano_features;
3235 netdev_features_t dflt_features;
3236 netdev_features_t tso_features;
3238 if (ice_is_safe_mode(pf)) {
3240 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3241 netdev->hw_features = netdev->features;
3245 dflt_features = NETIF_F_SG |
3250 csumo_features = NETIF_F_RXCSUM |
3255 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3256 NETIF_F_HW_VLAN_CTAG_TX |
3257 NETIF_F_HW_VLAN_CTAG_RX;
3259 tso_features = NETIF_F_TSO |
3263 NETIF_F_GSO_UDP_TUNNEL |
3264 NETIF_F_GSO_GRE_CSUM |
3265 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3266 NETIF_F_GSO_PARTIAL |
3267 NETIF_F_GSO_IPXIP4 |
3268 NETIF_F_GSO_IPXIP6 |
3271 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3272 NETIF_F_GSO_GRE_CSUM;
3273 /* set features that user can change */
3274 netdev->hw_features = dflt_features | csumo_features |
3275 vlano_features | tso_features;
3277 /* add support for HW_CSUM on packets with MPLS header */
3278 netdev->mpls_features = NETIF_F_HW_CSUM;
3280 /* enable features */
3281 netdev->features |= netdev->hw_features;
3283 netdev->hw_features |= NETIF_F_HW_TC;
3285 /* encap and VLAN devices inherit default, csumo and tso features */
3286 netdev->hw_enc_features |= dflt_features | csumo_features |
3288 netdev->vlan_features |= dflt_features | csumo_features |
3293 * ice_cfg_netdev - Allocate, configure and register a netdev
3294 * @vsi: the VSI associated with the new netdev
3296 * Returns 0 on success, negative value on failure
3298 static int ice_cfg_netdev(struct ice_vsi *vsi)
3300 struct ice_netdev_priv *np;
3301 struct net_device *netdev;
3302 u8 mac_addr[ETH_ALEN];
3304 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3309 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3310 vsi->netdev = netdev;
3311 np = netdev_priv(netdev);
3314 ice_set_netdev_features(netdev);
3316 ice_set_ops(netdev);
3318 if (vsi->type == ICE_VSI_PF) {
3319 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3320 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3321 eth_hw_addr_set(netdev, mac_addr);
3322 ether_addr_copy(netdev->perm_addr, mac_addr);
3325 netdev->priv_flags |= IFF_UNICAST_FLT;
3327 /* Setup netdev TC information */
3328 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3330 /* setup watchdog timeout value to be 5 second */
3331 netdev->watchdog_timeo = 5 * HZ;
3333 netdev->min_mtu = ETH_MIN_MTU;
3334 netdev->max_mtu = ICE_MAX_MTU;
3340 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3341 * @lut: Lookup table
3342 * @rss_table_size: Lookup table size
3343 * @rss_size: Range of queue number for hashing
3345 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3349 for (i = 0; i < rss_table_size; i++)
3350 lut[i] = i % rss_size;
3354 * ice_pf_vsi_setup - Set up a PF VSI
3355 * @pf: board private structure
3356 * @pi: pointer to the port_info instance
3358 * Returns pointer to the successfully allocated VSI software struct
3359 * on success, otherwise returns NULL on failure.
3361 static struct ice_vsi *
3362 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3364 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
3367 static struct ice_vsi *
3368 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3369 struct ice_channel *ch)
3371 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
3375 * ice_ctrl_vsi_setup - Set up a control VSI
3376 * @pf: board private structure
3377 * @pi: pointer to the port_info instance
3379 * Returns pointer to the successfully allocated VSI software struct
3380 * on success, otherwise returns NULL on failure.
3382 static struct ice_vsi *
3383 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3385 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
3389 * ice_lb_vsi_setup - Set up a loopback VSI
3390 * @pf: board private structure
3391 * @pi: pointer to the port_info instance
3393 * Returns pointer to the successfully allocated VSI software struct
3394 * on success, otherwise returns NULL on failure.
3397 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3399 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
3403 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3404 * @netdev: network interface to be adjusted
3406 * @vid: VLAN ID to be added
3408 * net_device_ops implementation for adding VLAN IDs
3411 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3413 struct ice_netdev_priv *np = netdev_priv(netdev);
3414 struct ice_vsi *vsi = np->vsi;
3415 struct ice_vlan vlan;
3418 /* VLAN 0 is added by default during load/reset */
3422 /* Enable VLAN pruning when a VLAN other than 0 is added */
3423 if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3424 ret = vsi->vlan_ops.ena_rx_filtering(vsi);
3429 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3430 * packets aren't pruned by the device's internal switch on Rx
3432 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3433 ret = vsi->vlan_ops.add_vlan(vsi, &vlan);
3435 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3441 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3442 * @netdev: network interface to be adjusted
3444 * @vid: VLAN ID to be removed
3446 * net_device_ops implementation for removing VLAN IDs
3449 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3451 struct ice_netdev_priv *np = netdev_priv(netdev);
3452 struct ice_vsi *vsi = np->vsi;
3453 struct ice_vlan vlan;
3456 /* don't allow removal of VLAN 0 */
3460 /* Make sure VLAN delete is successful before updating VLAN
3463 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3464 ret = vsi->vlan_ops.del_vlan(vsi, &vlan);
3468 /* Disable pruning when VLAN 0 is the only VLAN rule */
3469 if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3470 vsi->vlan_ops.dis_rx_filtering(vsi);
3472 set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3477 * ice_rep_indr_tc_block_unbind
3478 * @cb_priv: indirection block private data
3480 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3482 struct ice_indr_block_priv *indr_priv = cb_priv;
3484 list_del(&indr_priv->list);
3489 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3490 * @vsi: VSI struct which has the netdev
3492 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3494 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3496 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3497 ice_rep_indr_tc_block_unbind);
3501 * ice_tc_indir_block_remove - clean indirect TC block notifications
3504 static void ice_tc_indir_block_remove(struct ice_pf *pf)
3506 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3511 ice_tc_indir_block_unregister(pf_vsi);
3515 * ice_tc_indir_block_register - Register TC indirect block notifications
3516 * @vsi: VSI struct which has the netdev
3518 * Returns 0 on success, negative value on failure
3520 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3522 struct ice_netdev_priv *np;
3524 if (!vsi || !vsi->netdev)
3527 np = netdev_priv(vsi->netdev);
3529 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3530 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3534 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3535 * @pf: board private structure
3537 * Returns 0 on success, negative value on failure
3539 static int ice_setup_pf_sw(struct ice_pf *pf)
3541 struct device *dev = ice_pf_to_dev(pf);
3542 struct ice_vsi *vsi;
3545 if (ice_is_reset_in_progress(pf->state))
3548 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3552 /* init channel list */
3553 INIT_LIST_HEAD(&vsi->ch_list);
3555 status = ice_cfg_netdev(vsi);
3557 goto unroll_vsi_setup;
3558 /* netdev has to be configured before setting frame size */
3559 ice_vsi_cfg_frame_size(vsi);
3561 /* init indirect block notifications */
3562 status = ice_tc_indir_block_register(vsi);
3564 dev_err(dev, "Failed to register netdev notifier\n");
3565 goto unroll_cfg_netdev;
3568 /* Setup DCB netlink interface */
3569 ice_dcbnl_setup(vsi);
3571 /* registering the NAPI handler requires both the queues and
3572 * netdev to be created, which are done in ice_pf_vsi_setup()
3573 * and ice_cfg_netdev() respectively
3577 status = ice_set_cpu_rx_rmap(vsi);
3579 dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
3580 vsi->vsi_num, status);
3581 goto unroll_napi_add;
3583 status = ice_init_mac_fltr(pf);
3585 goto free_cpu_rx_map;
3590 ice_free_cpu_rx_rmap(vsi);
3592 ice_tc_indir_block_unregister(vsi);
3597 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3598 free_netdev(vsi->netdev);
3604 ice_vsi_release(vsi);
3609 * ice_get_avail_q_count - Get count of queues in use
3610 * @pf_qmap: bitmap to get queue use count from
3611 * @lock: pointer to a mutex that protects access to pf_qmap
3612 * @size: size of the bitmap
3615 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3621 for_each_clear_bit(bit, pf_qmap, size)
3629 * ice_get_avail_txq_count - Get count of Tx queues in use
3630 * @pf: pointer to an ice_pf instance
3632 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3634 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3639 * ice_get_avail_rxq_count - Get count of Rx queues in use
3640 * @pf: pointer to an ice_pf instance
3642 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3644 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3649 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3650 * @pf: board private structure to initialize
3652 static void ice_deinit_pf(struct ice_pf *pf)
3654 ice_service_task_stop(pf);
3655 mutex_destroy(&pf->sw_mutex);
3656 mutex_destroy(&pf->tc_mutex);
3657 mutex_destroy(&pf->avail_q_mutex);
3659 if (pf->avail_txqs) {
3660 bitmap_free(pf->avail_txqs);
3661 pf->avail_txqs = NULL;
3664 if (pf->avail_rxqs) {
3665 bitmap_free(pf->avail_rxqs);
3666 pf->avail_rxqs = NULL;
3670 ptp_clock_unregister(pf->ptp.clock);
3674 * ice_set_pf_caps - set PFs capability flags
3675 * @pf: pointer to the PF instance
3677 static void ice_set_pf_caps(struct ice_pf *pf)
3679 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3681 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3682 clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3683 if (func_caps->common_cap.rdma) {
3684 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3685 set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3687 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3688 if (func_caps->common_cap.dcb)
3689 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3690 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3691 if (func_caps->common_cap.sr_iov_1_1) {
3692 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3693 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3696 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3697 if (func_caps->common_cap.rss_table_size)
3698 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3700 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3701 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3704 /* ctrl_vsi_idx will be set to a valid value when flow director
3705 * is setup by ice_init_fdir
3707 pf->ctrl_vsi_idx = ICE_NO_VSI;
3708 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3709 /* force guaranteed filter pool for PF */
3710 ice_alloc_fd_guar_item(&pf->hw, &unused,
3711 func_caps->fd_fltr_guar);
3712 /* force shared filter pool for PF */
3713 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3714 func_caps->fd_fltr_best_effort);
3717 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3718 if (func_caps->common_cap.ieee_1588)
3719 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3721 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3722 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3726 * ice_init_pf - Initialize general software structures (struct ice_pf)
3727 * @pf: board private structure to initialize
3729 static int ice_init_pf(struct ice_pf *pf)
3731 ice_set_pf_caps(pf);
3733 mutex_init(&pf->sw_mutex);
3734 mutex_init(&pf->tc_mutex);
3736 INIT_HLIST_HEAD(&pf->aq_wait_list);
3737 spin_lock_init(&pf->aq_wait_lock);
3738 init_waitqueue_head(&pf->aq_wait_queue);
3740 init_waitqueue_head(&pf->reset_wait_queue);
3742 /* setup service timer and periodic service task */
3743 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3744 pf->serv_tmr_period = HZ;
3745 INIT_WORK(&pf->serv_task, ice_service_task);
3746 clear_bit(ICE_SERVICE_SCHED, pf->state);
3748 mutex_init(&pf->avail_q_mutex);
3749 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3750 if (!pf->avail_txqs)
3753 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3754 if (!pf->avail_rxqs) {
3755 devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
3756 pf->avail_txqs = NULL;
3764 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3765 * @pf: board private structure
3767 * compute the number of MSIX vectors required (v_budget) and request from
3768 * the OS. Return the number of vectors reserved or negative on failure
3770 static int ice_ena_msix_range(struct ice_pf *pf)
3772 int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3773 struct device *dev = ice_pf_to_dev(pf);
3776 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3777 num_cpus = num_online_cpus();
3779 /* reserve for LAN miscellaneous handler */
3780 needed = ICE_MIN_LAN_OICR_MSIX;
3781 if (v_left < needed)
3782 goto no_hw_vecs_left_err;
3786 /* reserve for flow director */
3787 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3788 needed = ICE_FDIR_MSIX;
3789 if (v_left < needed)
3790 goto no_hw_vecs_left_err;
3795 /* reserve for switchdev */
3796 needed = ICE_ESWITCH_MSIX;
3797 if (v_left < needed)
3798 goto no_hw_vecs_left_err;
3802 /* total used for non-traffic vectors */
3805 /* reserve vectors for LAN traffic */
3807 if (v_left < needed)
3808 goto no_hw_vecs_left_err;
3809 pf->num_lan_msix = needed;
3813 /* reserve vectors for RDMA auxiliary driver */
3814 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3815 needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3816 if (v_left < needed)
3817 goto no_hw_vecs_left_err;
3818 pf->num_rdma_msix = needed;
3823 pf->msix_entries = devm_kcalloc(dev, v_budget,
3824 sizeof(*pf->msix_entries), GFP_KERNEL);
3825 if (!pf->msix_entries) {
3830 for (i = 0; i < v_budget; i++)
3831 pf->msix_entries[i].entry = i;
3833 /* actually reserve the vectors */
3834 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3835 ICE_MIN_MSIX, v_budget);
3837 dev_err(dev, "unable to reserve MSI-X vectors\n");
3842 if (v_actual < v_budget) {
3843 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3844 v_budget, v_actual);
3846 if (v_actual < ICE_MIN_MSIX) {
3847 /* error if we can't get minimum vectors */
3848 pci_disable_msix(pf->pdev);
3852 int v_remain = v_actual - v_other;
3853 int v_rdma = 0, v_min_rdma = 0;
3855 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3856 /* Need at least 1 interrupt in addition to
3859 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3860 v_min_rdma = ICE_MIN_RDMA_MSIX;
3863 if (v_actual == ICE_MIN_MSIX ||
3864 v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3865 dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3866 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3868 pf->num_rdma_msix = 0;
3869 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3870 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3871 (v_remain - v_rdma < v_rdma)) {
3872 /* Support minimum RDMA and give remaining
3873 * vectors to LAN MSIX
3875 pf->num_rdma_msix = v_min_rdma;
3876 pf->num_lan_msix = v_remain - v_min_rdma;
3878 /* Split remaining MSIX with RDMA after
3879 * accounting for AEQ MSIX
3881 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3882 ICE_RDMA_NUM_AEQ_MSIX;
3883 pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3886 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3889 if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3890 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3898 devm_kfree(dev, pf->msix_entries);
3901 no_hw_vecs_left_err:
3902 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3906 pf->num_rdma_msix = 0;
3907 pf->num_lan_msix = 0;
3912 * ice_dis_msix - Disable MSI-X interrupt setup in OS
3913 * @pf: board private structure
3915 static void ice_dis_msix(struct ice_pf *pf)
3917 pci_disable_msix(pf->pdev);
3918 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3919 pf->msix_entries = NULL;
3923 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3924 * @pf: board private structure
3926 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3930 if (pf->irq_tracker) {
3931 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3932 pf->irq_tracker = NULL;
3937 * ice_init_interrupt_scheme - Determine proper interrupt scheme
3938 * @pf: board private structure to initialize
3940 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3944 vectors = ice_ena_msix_range(pf);
3949 /* set up vector assignment tracking */
3950 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3951 struct_size(pf->irq_tracker, list, vectors),
3953 if (!pf->irq_tracker) {
3958 /* populate SW interrupts pool with number of OS granted IRQs. */
3959 pf->num_avail_sw_msix = (u16)vectors;
3960 pf->irq_tracker->num_entries = (u16)vectors;
3961 pf->irq_tracker->end = pf->irq_tracker->num_entries;
3967 * ice_is_wol_supported - check if WoL is supported
3968 * @hw: pointer to hardware info
3970 * Check if WoL is supported based on the HW configuration.
3971 * Returns true if NVM supports and enables WoL for this port, false otherwise
3973 bool ice_is_wol_supported(struct ice_hw *hw)
3977 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3978 * word) indicates WoL is not supported on the corresponding PF ID.
3980 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3983 return !(BIT(hw->port_info->lport) & wol_ctrl);
3987 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3988 * @vsi: VSI being changed
3989 * @new_rx: new number of Rx queues
3990 * @new_tx: new number of Tx queues
3992 * Only change the number of queues if new_tx, or new_rx is non-0.
3994 * Returns 0 on success.
3996 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3998 struct ice_pf *pf = vsi->back;
3999 int err = 0, timeout = 50;
4001 if (!new_rx && !new_tx)
4004 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4008 usleep_range(1000, 2000);
4012 vsi->req_txq = (u16)new_tx;
4014 vsi->req_rxq = (u16)new_rx;
4016 /* set for the next time the netdev is started */
4017 if (!netif_running(vsi->netdev)) {
4018 ice_vsi_rebuild(vsi, false);
4019 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4024 ice_vsi_rebuild(vsi, false);
4025 ice_pf_dcb_recfg(pf);
4028 clear_bit(ICE_CFG_BUSY, pf->state);
4033 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4034 * @pf: PF to configure
4036 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4037 * VSI can still Tx/Rx VLAN tagged packets.
4039 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4041 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4042 struct ice_vsi_ctx *ctxt;
4049 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4054 ctxt->info = vsi->info;
4056 ctxt->info.valid_sections =
4057 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4058 ICE_AQ_VSI_PROP_SECURITY_VALID |
4059 ICE_AQ_VSI_PROP_SW_VALID);
4061 /* disable VLAN anti-spoof */
4062 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4063 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4065 /* disable VLAN pruning and keep all other settings */
4066 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4068 /* allow all VLANs on Tx and don't strip on Rx */
4069 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
4070 ICE_AQ_VSI_VLAN_EMOD_NOTHING;
4072 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4074 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4075 status, ice_aq_str(hw->adminq.sq_last_status));
4077 vsi->info.sec_flags = ctxt->info.sec_flags;
4078 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4079 vsi->info.vlan_flags = ctxt->info.vlan_flags;
4086 * ice_log_pkg_init - log result of DDP package load
4087 * @hw: pointer to hardware info
4088 * @state: state of package load
4090 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4092 struct ice_pf *pf = hw->back;
4095 dev = ice_pf_to_dev(pf);
4098 case ICE_DDP_PKG_SUCCESS:
4099 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4100 hw->active_pkg_name,
4101 hw->active_pkg_ver.major,
4102 hw->active_pkg_ver.minor,
4103 hw->active_pkg_ver.update,
4104 hw->active_pkg_ver.draft);
4106 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4107 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4108 hw->active_pkg_name,
4109 hw->active_pkg_ver.major,
4110 hw->active_pkg_ver.minor,
4111 hw->active_pkg_ver.update,
4112 hw->active_pkg_ver.draft);
4114 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4115 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4116 hw->active_pkg_name,
4117 hw->active_pkg_ver.major,
4118 hw->active_pkg_ver.minor,
4119 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4121 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4122 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4123 hw->active_pkg_name,
4124 hw->active_pkg_ver.major,
4125 hw->active_pkg_ver.minor,
4126 hw->active_pkg_ver.update,
4127 hw->active_pkg_ver.draft,
4134 case ICE_DDP_PKG_FW_MISMATCH:
4135 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4137 case ICE_DDP_PKG_INVALID_FILE:
4138 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4140 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4141 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4143 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4144 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4145 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4147 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4148 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4150 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4151 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4153 case ICE_DDP_PKG_LOAD_ERROR:
4154 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4155 /* poll for reset to complete */
4156 if (ice_check_reset(hw))
4157 dev_err(dev, "Error resetting device. Please reload the driver\n");
4159 case ICE_DDP_PKG_ERR:
4161 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4167 * ice_load_pkg - load/reload the DDP Package file
4168 * @firmware: firmware structure when firmware requested or NULL for reload
4169 * @pf: pointer to the PF instance
4171 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4172 * initialize HW tables.
4175 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4177 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4178 struct device *dev = ice_pf_to_dev(pf);
4179 struct ice_hw *hw = &pf->hw;
4181 /* Load DDP Package */
4182 if (firmware && !hw->pkg_copy) {
4183 state = ice_copy_and_init_pkg(hw, firmware->data,
4185 ice_log_pkg_init(hw, state);
4186 } else if (!firmware && hw->pkg_copy) {
4187 /* Reload package during rebuild after CORER/GLOBR reset */
4188 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4189 ice_log_pkg_init(hw, state);
4191 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4194 if (!ice_is_init_pkg_successful(state)) {
4196 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4200 /* Successful download package is the precondition for advanced
4201 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4203 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4207 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4208 * @pf: pointer to the PF structure
4210 * There is no error returned here because the driver should be able to handle
4211 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4212 * specifically with Tx.
4214 static void ice_verify_cacheline_size(struct ice_pf *pf)
4216 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4217 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4218 ICE_CACHE_LINE_BYTES);
4222 * ice_send_version - update firmware with driver version
4225 * Returns 0 on success, else error code
4227 static int ice_send_version(struct ice_pf *pf)
4229 struct ice_driver_ver dv;
4231 dv.major_ver = 0xff;
4232 dv.minor_ver = 0xff;
4233 dv.build_ver = 0xff;
4234 dv.subbuild_ver = 0;
4235 strscpy((char *)dv.driver_string, UTS_RELEASE,
4236 sizeof(dv.driver_string));
4237 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4241 * ice_init_fdir - Initialize flow director VSI and configuration
4242 * @pf: pointer to the PF instance
4244 * returns 0 on success, negative on error
4246 static int ice_init_fdir(struct ice_pf *pf)
4248 struct device *dev = ice_pf_to_dev(pf);
4249 struct ice_vsi *ctrl_vsi;
4252 /* Side Band Flow Director needs to have a control VSI.
4253 * Allocate it and store it in the PF.
4255 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4257 dev_dbg(dev, "could not create control VSI\n");
4261 err = ice_vsi_open_ctrl(ctrl_vsi);
4263 dev_dbg(dev, "could not open control VSI\n");
4267 mutex_init(&pf->hw.fdir_fltr_lock);
4269 err = ice_fdir_create_dflt_rules(pf);
4276 ice_fdir_release_flows(&pf->hw);
4277 ice_vsi_close(ctrl_vsi);
4279 ice_vsi_release(ctrl_vsi);
4280 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4281 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4282 pf->ctrl_vsi_idx = ICE_NO_VSI;
4288 * ice_get_opt_fw_name - return optional firmware file name or NULL
4289 * @pf: pointer to the PF instance
4291 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4293 /* Optional firmware name same as default with additional dash
4294 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4296 struct pci_dev *pdev = pf->pdev;
4297 char *opt_fw_filename;
4300 /* Determine the name of the optional file using the DSN (two
4301 * dwords following the start of the DSN Capability).
4303 dsn = pci_get_dsn(pdev);
4307 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4308 if (!opt_fw_filename)
4311 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4312 ICE_DDP_PKG_PATH, dsn);
4314 return opt_fw_filename;
4318 * ice_request_fw - Device initialization routine
4319 * @pf: pointer to the PF instance
4321 static void ice_request_fw(struct ice_pf *pf)
4323 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4324 const struct firmware *firmware = NULL;
4325 struct device *dev = ice_pf_to_dev(pf);
4328 /* optional device-specific DDP (if present) overrides the default DDP
4329 * package file. kernel logs a debug message if the file doesn't exist,
4330 * and warning messages for other errors.
4332 if (opt_fw_filename) {
4333 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4335 kfree(opt_fw_filename);
4339 /* request for firmware was successful. Download to device */
4340 ice_load_pkg(firmware, pf);
4341 kfree(opt_fw_filename);
4342 release_firmware(firmware);
4347 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4349 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4353 /* request for firmware was successful. Download to device */
4354 ice_load_pkg(firmware, pf);
4355 release_firmware(firmware);
4359 * ice_print_wake_reason - show the wake up cause in the log
4360 * @pf: pointer to the PF struct
4362 static void ice_print_wake_reason(struct ice_pf *pf)
4364 u32 wus = pf->wakeup_reason;
4365 const char *wake_str;
4367 /* if no wake event, nothing to print */
4371 if (wus & PFPM_WUS_LNKC_M)
4372 wake_str = "Link\n";
4373 else if (wus & PFPM_WUS_MAG_M)
4374 wake_str = "Magic Packet\n";
4375 else if (wus & PFPM_WUS_MNG_M)
4376 wake_str = "Management\n";
4377 else if (wus & PFPM_WUS_FW_RST_WK_M)
4378 wake_str = "Firmware Reset\n";
4380 wake_str = "Unknown\n";
4382 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4386 * ice_register_netdev - register netdev and devlink port
4387 * @pf: pointer to the PF struct
4389 static int ice_register_netdev(struct ice_pf *pf)
4391 struct ice_vsi *vsi;
4394 vsi = ice_get_main_vsi(pf);
4395 if (!vsi || !vsi->netdev)
4398 err = register_netdev(vsi->netdev);
4400 goto err_register_netdev;
4402 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4403 netif_carrier_off(vsi->netdev);
4404 netif_tx_stop_all_queues(vsi->netdev);
4405 err = ice_devlink_create_pf_port(pf);
4407 goto err_devlink_create;
4409 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
4413 unregister_netdev(vsi->netdev);
4414 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4415 err_register_netdev:
4416 free_netdev(vsi->netdev);
4418 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4423 * ice_probe - Device initialization routine
4424 * @pdev: PCI device information struct
4425 * @ent: entry in ice_pci_tbl
4427 * Returns 0 on success, negative on failure
4430 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4432 struct device *dev = &pdev->dev;
4437 if (pdev->is_virtfn) {
4438 dev_err(dev, "can't probe a virtual function\n");
4442 /* this driver uses devres, see
4443 * Documentation/driver-api/driver-model/devres.rst
4445 err = pcim_enable_device(pdev);
4449 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4451 dev_err(dev, "BAR0 I/O map error %d\n", err);
4455 pf = ice_allocate_pf(dev);
4459 /* initialize Auxiliary index to invalid value */
4462 /* set up for high or low DMA */
4463 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4465 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4469 pci_enable_pcie_error_reporting(pdev);
4470 pci_set_master(pdev);
4473 pci_set_drvdata(pdev, pf);
4474 set_bit(ICE_DOWN, pf->state);
4475 /* Disable service task until DOWN bit is cleared */
4476 set_bit(ICE_SERVICE_DIS, pf->state);
4479 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4480 pci_save_state(pdev);
4483 hw->vendor_id = pdev->vendor;
4484 hw->device_id = pdev->device;
4485 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4486 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4487 hw->subsystem_device_id = pdev->subsystem_device;
4488 hw->bus.device = PCI_SLOT(pdev->devfn);
4489 hw->bus.func = PCI_FUNC(pdev->devfn);
4490 ice_set_ctrlq_len(hw);
4492 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4494 #ifndef CONFIG_DYNAMIC_DEBUG
4496 hw->debug_mask = debug;
4499 err = ice_init_hw(hw);
4501 dev_err(dev, "ice_init_hw failed: %d\n", err);
4503 goto err_exit_unroll;
4506 ice_init_feature_support(pf);
4510 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4511 * set in pf->state, which will cause ice_is_safe_mode to return
4514 if (ice_is_safe_mode(pf)) {
4515 /* we already got function/device capabilities but these don't
4516 * reflect what the driver needs to do in safe mode. Instead of
4517 * adding conditional logic everywhere to ignore these
4518 * device/function capabilities, override them.
4520 ice_set_safe_mode_caps(hw);
4523 err = ice_init_pf(pf);
4525 dev_err(dev, "ice_init_pf failed: %d\n", err);
4526 goto err_init_pf_unroll;
4529 ice_devlink_init_regions(pf);
4531 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4532 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4533 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4534 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4536 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4537 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4538 pf->hw.tnl.valid_count[TNL_VXLAN];
4539 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4540 UDP_TUNNEL_TYPE_VXLAN;
4543 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4544 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4545 pf->hw.tnl.valid_count[TNL_GENEVE];
4546 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4547 UDP_TUNNEL_TYPE_GENEVE;
4551 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4552 if (!pf->num_alloc_vsi) {
4554 goto err_init_pf_unroll;
4556 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4557 dev_warn(&pf->pdev->dev,
4558 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4559 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4560 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4563 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4567 goto err_init_pf_unroll;
4570 err = ice_init_interrupt_scheme(pf);
4572 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4574 goto err_init_vsi_unroll;
4577 /* In case of MSIX we are going to setup the misc vector right here
4578 * to handle admin queue events etc. In case of legacy and MSI
4579 * the misc functionality and queue processing is combined in
4580 * the same vector and that gets setup at open.
4582 err = ice_req_irq_msix_misc(pf);
4584 dev_err(dev, "setup of misc vector failed: %d\n", err);
4585 goto err_init_interrupt_unroll;
4588 /* create switch struct for the switch element created by FW on boot */
4589 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4590 if (!pf->first_sw) {
4592 goto err_msix_misc_unroll;
4596 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4598 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4600 pf->first_sw->pf = pf;
4602 /* record the sw_id available for later use */
4603 pf->first_sw->sw_id = hw->port_info->sw_id;
4605 err = ice_setup_pf_sw(pf);
4607 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4608 goto err_alloc_sw_unroll;
4611 clear_bit(ICE_SERVICE_DIS, pf->state);
4613 /* tell the firmware we are up */
4614 err = ice_send_version(pf);
4616 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4618 goto err_send_version_unroll;
4621 /* since everything is good, start the service timer */
4622 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4624 err = ice_init_link_events(pf->hw.port_info);
4626 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4627 goto err_send_version_unroll;
4630 /* not a fatal error if this fails */
4631 err = ice_init_nvm_phy_type(pf->hw.port_info);
4633 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4635 /* not a fatal error if this fails */
4636 err = ice_update_link_info(pf->hw.port_info);
4638 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4640 ice_init_link_dflt_override(pf->hw.port_info);
4642 ice_check_link_cfg_err(pf,
4643 pf->hw.port_info->phy.link_info.link_cfg_err);
4645 /* if media available, initialize PHY settings */
4646 if (pf->hw.port_info->phy.link_info.link_info &
4647 ICE_AQ_MEDIA_AVAILABLE) {
4648 /* not a fatal error if this fails */
4649 err = ice_init_phy_user_cfg(pf->hw.port_info);
4651 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4653 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4654 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4657 ice_configure_phy(vsi);
4660 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4663 ice_verify_cacheline_size(pf);
4665 /* Save wakeup reason register for later use */
4666 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4668 /* check for a power management event */
4669 ice_print_wake_reason(pf);
4671 /* clear wake status, all bits */
4672 wr32(hw, PFPM_WUS, U32_MAX);
4674 /* Disable WoL at init, wait for user to enable */
4675 device_set_wakeup_enable(dev, false);
4677 if (ice_is_safe_mode(pf)) {
4678 ice_set_safe_mode_vlan_cfg(pf);
4682 /* initialize DDP driven features */
4683 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4686 /* Note: Flow director init failure is non-fatal to load */
4687 if (ice_init_fdir(pf))
4688 dev_err(dev, "could not initialize flow director\n");
4690 /* Note: DCB init failure is non-fatal to load */
4691 if (ice_init_pf_dcb(pf, false)) {
4692 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4693 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4695 ice_cfg_lldp_mib_change(&pf->hw, true);
4698 if (ice_init_lag(pf))
4699 dev_warn(dev, "Failed to init link aggregation support\n");
4701 /* print PCI link speed and width */
4702 pcie_print_link_status(pf->pdev);
4705 err = ice_register_netdev(pf);
4707 goto err_netdev_reg;
4709 err = ice_devlink_register_params(pf);
4711 goto err_netdev_reg;
4713 /* ready to go, so clear down state bit */
4714 clear_bit(ICE_DOWN, pf->state);
4715 if (ice_is_aux_ena(pf)) {
4716 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4717 if (pf->aux_idx < 0) {
4718 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4720 goto err_devlink_reg_param;
4723 err = ice_init_rdma(pf);
4725 dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4727 goto err_init_aux_unroll;
4730 dev_warn(dev, "RDMA is not supported on this device\n");
4733 ice_devlink_register(pf);
4736 err_init_aux_unroll:
4738 ida_free(&ice_aux_ida, pf->aux_idx);
4739 err_devlink_reg_param:
4740 ice_devlink_unregister_params(pf);
4742 err_send_version_unroll:
4743 ice_vsi_release_all(pf);
4744 err_alloc_sw_unroll:
4745 set_bit(ICE_SERVICE_DIS, pf->state);
4746 set_bit(ICE_DOWN, pf->state);
4747 devm_kfree(dev, pf->first_sw);
4748 err_msix_misc_unroll:
4749 ice_free_irq_msix_misc(pf);
4750 err_init_interrupt_unroll:
4751 ice_clear_interrupt_scheme(pf);
4752 err_init_vsi_unroll:
4753 devm_kfree(dev, pf->vsi);
4756 ice_devlink_destroy_regions(pf);
4759 pci_disable_pcie_error_reporting(pdev);
4760 pci_disable_device(pdev);
4765 * ice_set_wake - enable or disable Wake on LAN
4766 * @pf: pointer to the PF struct
4768 * Simple helper for WoL control
4770 static void ice_set_wake(struct ice_pf *pf)
4772 struct ice_hw *hw = &pf->hw;
4773 bool wol = pf->wol_ena;
4775 /* clear wake state, otherwise new wake events won't fire */
4776 wr32(hw, PFPM_WUS, U32_MAX);
4778 /* enable / disable APM wake up, no RMW needed */
4779 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4781 /* set magic packet filter enabled */
4782 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4786 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4787 * @pf: pointer to the PF struct
4789 * Issue firmware command to enable multicast magic wake, making
4790 * sure that any locally administered address (LAA) is used for
4791 * wake, and that PF reset doesn't undo the LAA.
4793 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4795 struct device *dev = ice_pf_to_dev(pf);
4796 struct ice_hw *hw = &pf->hw;
4797 u8 mac_addr[ETH_ALEN];
4798 struct ice_vsi *vsi;
4805 vsi = ice_get_main_vsi(pf);
4809 /* Get current MAC address in case it's an LAA */
4811 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4813 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4815 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4816 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4817 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4819 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4821 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
4822 status, ice_aq_str(hw->adminq.sq_last_status));
4826 * ice_remove - Device removal routine
4827 * @pdev: PCI device information struct
4829 static void ice_remove(struct pci_dev *pdev)
4831 struct ice_pf *pf = pci_get_drvdata(pdev);
4834 ice_devlink_unregister(pf);
4835 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4836 if (!ice_is_reset_in_progress(pf->state))
4841 ice_tc_indir_block_remove(pf);
4843 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4844 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4848 ice_service_task_stop(pf);
4850 ice_aq_cancel_waiting_tasks(pf);
4851 ice_unplug_aux_dev(pf);
4852 if (pf->aux_idx >= 0)
4853 ida_free(&ice_aux_ida, pf->aux_idx);
4854 ice_devlink_unregister_params(pf);
4855 set_bit(ICE_DOWN, pf->state);
4857 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4859 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4860 ice_ptp_release(pf);
4861 if (!ice_is_safe_mode(pf))
4862 ice_remove_arfs(pf);
4863 ice_setup_mc_magic_wake(pf);
4864 ice_vsi_release_all(pf);
4866 ice_free_irq_msix_misc(pf);
4867 ice_for_each_vsi(pf, i) {
4870 ice_vsi_free_q_vectors(pf->vsi[i]);
4873 ice_devlink_destroy_regions(pf);
4874 ice_deinit_hw(&pf->hw);
4876 /* Issue a PFR as part of the prescribed driver unload flow. Do not
4877 * do it via ice_schedule_reset() since there is no need to rebuild
4878 * and the service task is already stopped.
4880 ice_reset(&pf->hw, ICE_RESET_PFR);
4881 pci_wait_for_pending_transaction(pdev);
4882 ice_clear_interrupt_scheme(pf);
4883 pci_disable_pcie_error_reporting(pdev);
4884 pci_disable_device(pdev);
4888 * ice_shutdown - PCI callback for shutting down device
4889 * @pdev: PCI device information struct
4891 static void ice_shutdown(struct pci_dev *pdev)
4893 struct ice_pf *pf = pci_get_drvdata(pdev);
4897 if (system_state == SYSTEM_POWER_OFF) {
4898 pci_wake_from_d3(pdev, pf->wol_ena);
4899 pci_set_power_state(pdev, PCI_D3hot);
4905 * ice_prepare_for_shutdown - prep for PCI shutdown
4906 * @pf: board private structure
4908 * Inform or close all dependent features in prep for PCI device shutdown
4910 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4912 struct ice_hw *hw = &pf->hw;
4915 /* Notify VFs of impending reset */
4916 if (ice_check_sq_alive(hw, &hw->mailboxq))
4917 ice_vc_notify_reset(pf);
4919 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4921 /* disable the VSIs and their queues that are not already DOWN */
4922 ice_pf_dis_all_vsi(pf, false);
4924 ice_for_each_vsi(pf, v)
4926 pf->vsi[v]->vsi_num = 0;
4928 ice_shutdown_all_ctrlq(hw);
4932 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4933 * @pf: board private structure to reinitialize
4935 * This routine reinitialize interrupt scheme that was cleared during
4936 * power management suspend callback.
4938 * This should be called during resume routine to re-allocate the q_vectors
4939 * and reacquire interrupts.
4941 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4943 struct device *dev = ice_pf_to_dev(pf);
4946 /* Since we clear MSIX flag during suspend, we need to
4947 * set it back during resume...
4950 ret = ice_init_interrupt_scheme(pf);
4952 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4956 /* Remap vectors and rings, after successful re-init interrupts */
4957 ice_for_each_vsi(pf, v) {
4961 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4964 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4967 ret = ice_req_irq_msix_misc(pf);
4969 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4979 ice_vsi_free_q_vectors(pf->vsi[v]);
4986 * @dev: generic device information structure
4988 * Power Management callback to quiesce the device and prepare
4989 * for D3 transition.
4991 static int __maybe_unused ice_suspend(struct device *dev)
4993 struct pci_dev *pdev = to_pci_dev(dev);
4997 pf = pci_get_drvdata(pdev);
4999 if (!ice_pf_state_is_nominal(pf)) {
5000 dev_err(dev, "Device is not ready, no need to suspend it\n");
5004 /* Stop watchdog tasks until resume completion.
5005 * Even though it is most likely that the service task is
5006 * disabled if the device is suspended or down, the service task's
5007 * state is controlled by a different state bit, and we should
5008 * store and honor whatever state that bit is in at this point.
5010 disabled = ice_service_task_stop(pf);
5012 ice_unplug_aux_dev(pf);
5014 /* Already suspended?, then there is nothing to do */
5015 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5017 ice_service_task_restart(pf);
5021 if (test_bit(ICE_DOWN, pf->state) ||
5022 ice_is_reset_in_progress(pf->state)) {
5023 dev_err(dev, "can't suspend device in reset or already down\n");
5025 ice_service_task_restart(pf);
5029 ice_setup_mc_magic_wake(pf);
5031 ice_prepare_for_shutdown(pf);
5035 /* Free vectors, clear the interrupt scheme and release IRQs
5036 * for proper hibernation, especially with large number of CPUs.
5037 * Otherwise hibernation might fail when mapping all the vectors back
5040 ice_free_irq_msix_misc(pf);
5041 ice_for_each_vsi(pf, v) {
5044 ice_vsi_free_q_vectors(pf->vsi[v]);
5046 ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
5047 ice_clear_interrupt_scheme(pf);
5049 pci_save_state(pdev);
5050 pci_wake_from_d3(pdev, pf->wol_ena);
5051 pci_set_power_state(pdev, PCI_D3hot);
5056 * ice_resume - PM callback for waking up from D3
5057 * @dev: generic device information structure
5059 static int __maybe_unused ice_resume(struct device *dev)
5061 struct pci_dev *pdev = to_pci_dev(dev);
5062 enum ice_reset_req reset_type;
5067 pci_set_power_state(pdev, PCI_D0);
5068 pci_restore_state(pdev);
5069 pci_save_state(pdev);
5071 if (!pci_device_is_present(pdev))
5074 ret = pci_enable_device_mem(pdev);
5076 dev_err(dev, "Cannot enable device after suspend\n");
5080 pf = pci_get_drvdata(pdev);
5083 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5084 ice_print_wake_reason(pf);
5086 /* We cleared the interrupt scheme when we suspended, so we need to
5087 * restore it now to resume device functionality.
5089 ret = ice_reinit_interrupt_scheme(pf);
5091 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5093 clear_bit(ICE_DOWN, pf->state);
5094 /* Now perform PF reset and rebuild */
5095 reset_type = ICE_RESET_PFR;
5096 /* re-enable service task for reset, but allow reset to schedule it */
5097 clear_bit(ICE_SERVICE_DIS, pf->state);
5099 if (ice_schedule_reset(pf, reset_type))
5100 dev_err(dev, "Reset during resume failed.\n");
5102 clear_bit(ICE_SUSPENDED, pf->state);
5103 ice_service_task_restart(pf);
5105 /* Restart the service task */
5106 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5110 #endif /* CONFIG_PM */
5113 * ice_pci_err_detected - warning that PCI error has been detected
5114 * @pdev: PCI device information struct
5115 * @err: the type of PCI error
5117 * Called to warn that something happened on the PCI bus and the error handling
5118 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5120 static pci_ers_result_t
5121 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5123 struct ice_pf *pf = pci_get_drvdata(pdev);
5126 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5128 return PCI_ERS_RESULT_DISCONNECT;
5131 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5132 ice_service_task_stop(pf);
5134 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5135 set_bit(ICE_PFR_REQ, pf->state);
5136 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5140 return PCI_ERS_RESULT_NEED_RESET;
5144 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5145 * @pdev: PCI device information struct
5147 * Called to determine if the driver can recover from the PCI slot reset by
5148 * using a register read to determine if the device is recoverable.
5150 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5152 struct ice_pf *pf = pci_get_drvdata(pdev);
5153 pci_ers_result_t result;
5157 err = pci_enable_device_mem(pdev);
5159 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5161 result = PCI_ERS_RESULT_DISCONNECT;
5163 pci_set_master(pdev);
5164 pci_restore_state(pdev);
5165 pci_save_state(pdev);
5166 pci_wake_from_d3(pdev, false);
5168 /* Check for life */
5169 reg = rd32(&pf->hw, GLGEN_RTRIG);
5171 result = PCI_ERS_RESULT_RECOVERED;
5173 result = PCI_ERS_RESULT_DISCONNECT;
5176 err = pci_aer_clear_nonfatal_status(pdev);
5178 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
5180 /* non-fatal, continue */
5186 * ice_pci_err_resume - restart operations after PCI error recovery
5187 * @pdev: PCI device information struct
5189 * Called to allow the driver to bring things back up after PCI error and/or
5190 * reset recovery have finished
5192 static void ice_pci_err_resume(struct pci_dev *pdev)
5194 struct ice_pf *pf = pci_get_drvdata(pdev);
5197 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5202 if (test_bit(ICE_SUSPENDED, pf->state)) {
5203 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5208 ice_restore_all_vfs_msi_state(pdev);
5210 ice_do_reset(pf, ICE_RESET_PFR);
5211 ice_service_task_restart(pf);
5212 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5216 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5217 * @pdev: PCI device information struct
5219 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5221 struct ice_pf *pf = pci_get_drvdata(pdev);
5223 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5224 ice_service_task_stop(pf);
5226 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5227 set_bit(ICE_PFR_REQ, pf->state);
5228 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5234 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5235 * @pdev: PCI device information struct
5237 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5239 ice_pci_err_resume(pdev);
5242 /* ice_pci_tbl - PCI Device ID Table
5244 * Wildcard entries (PCI_ANY_ID) should come last
5245 * Last entry must be all 0s
5247 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5248 * Class, Class Mask, private data (not used) }
5250 static const struct pci_device_id ice_pci_tbl[] = {
5251 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5252 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5253 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5254 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5255 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5256 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5257 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5258 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5259 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5260 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5261 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5262 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5263 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5264 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5265 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5266 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5267 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5268 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5269 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5270 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5271 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5272 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5273 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5274 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5275 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5276 /* required last entry */
5279 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5281 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5283 static const struct pci_error_handlers ice_pci_err_handler = {
5284 .error_detected = ice_pci_err_detected,
5285 .slot_reset = ice_pci_err_slot_reset,
5286 .reset_prepare = ice_pci_err_reset_prepare,
5287 .reset_done = ice_pci_err_reset_done,
5288 .resume = ice_pci_err_resume
5291 static struct pci_driver ice_driver = {
5292 .name = KBUILD_MODNAME,
5293 .id_table = ice_pci_tbl,
5295 .remove = ice_remove,
5297 .driver.pm = &ice_pm_ops,
5298 #endif /* CONFIG_PM */
5299 .shutdown = ice_shutdown,
5300 .sriov_configure = ice_sriov_configure,
5301 .err_handler = &ice_pci_err_handler
5305 * ice_module_init - Driver registration routine
5307 * ice_module_init is the first routine called when the driver is
5308 * loaded. All it does is register with the PCI subsystem.
5310 static int __init ice_module_init(void)
5314 pr_info("%s\n", ice_driver_string);
5315 pr_info("%s\n", ice_copyright);
5317 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
5319 pr_err("Failed to create workqueue\n");
5323 status = pci_register_driver(&ice_driver);
5325 pr_err("failed to register PCI driver, err %d\n", status);
5326 destroy_workqueue(ice_wq);
5331 module_init(ice_module_init);
5334 * ice_module_exit - Driver exit cleanup routine
5336 * ice_module_exit is called just before the driver is removed
5339 static void __exit ice_module_exit(void)
5341 pci_unregister_driver(&ice_driver);
5342 destroy_workqueue(ice_wq);
5343 pr_info("module unloaded\n");
5345 module_exit(ice_module_exit);
5348 * ice_set_mac_address - NDO callback to set MAC address
5349 * @netdev: network interface device structure
5350 * @pi: pointer to an address structure
5352 * Returns 0 on success, negative on failure
5354 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5356 struct ice_netdev_priv *np = netdev_priv(netdev);
5357 struct ice_vsi *vsi = np->vsi;
5358 struct ice_pf *pf = vsi->back;
5359 struct ice_hw *hw = &pf->hw;
5360 struct sockaddr *addr = pi;
5361 u8 old_mac[ETH_ALEN];
5366 mac = (u8 *)addr->sa_data;
5368 if (!is_valid_ether_addr(mac))
5369 return -EADDRNOTAVAIL;
5371 if (ether_addr_equal(netdev->dev_addr, mac)) {
5372 netdev_dbg(netdev, "already using mac %pM\n", mac);
5376 if (test_bit(ICE_DOWN, pf->state) ||
5377 ice_is_reset_in_progress(pf->state)) {
5378 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5383 if (ice_chnl_dmac_fltr_cnt(pf)) {
5384 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5389 netif_addr_lock_bh(netdev);
5390 ether_addr_copy(old_mac, netdev->dev_addr);
5391 /* change the netdev's MAC address */
5392 eth_hw_addr_set(netdev, mac);
5393 netif_addr_unlock_bh(netdev);
5395 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5396 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5397 if (err && err != -ENOENT) {
5398 err = -EADDRNOTAVAIL;
5399 goto err_update_filters;
5402 /* Add filter for new MAC. If filter exists, return success */
5403 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5405 /* Although this MAC filter is already present in hardware it's
5406 * possible in some cases (e.g. bonding) that dev_addr was
5407 * modified outside of the driver and needs to be restored back
5410 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5412 /* error if the new filter addition failed */
5413 err = -EADDRNOTAVAIL;
5417 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5419 netif_addr_lock_bh(netdev);
5420 eth_hw_addr_set(netdev, old_mac);
5421 netif_addr_unlock_bh(netdev);
5425 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5428 /* write new MAC address to the firmware */
5429 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5430 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5432 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5439 * ice_set_rx_mode - NDO callback to set the netdev filters
5440 * @netdev: network interface device structure
5442 static void ice_set_rx_mode(struct net_device *netdev)
5444 struct ice_netdev_priv *np = netdev_priv(netdev);
5445 struct ice_vsi *vsi = np->vsi;
5450 /* Set the flags to synchronize filters
5451 * ndo_set_rx_mode may be triggered even without a change in netdev
5454 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5455 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5456 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5458 /* schedule our worker thread which will take care of
5459 * applying the new filter changes
5461 ice_service_task_schedule(vsi->back);
5465 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5466 * @netdev: network interface device structure
5467 * @queue_index: Queue ID
5468 * @maxrate: maximum bandwidth in Mbps
5471 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5473 struct ice_netdev_priv *np = netdev_priv(netdev);
5474 struct ice_vsi *vsi = np->vsi;
5479 /* Validate maxrate requested is within permitted range */
5480 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5481 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5482 maxrate, queue_index);
5486 q_handle = vsi->tx_rings[queue_index]->q_handle;
5487 tc = ice_dcb_get_tc(vsi, queue_index);
5489 /* Set BW back to default, when user set maxrate to 0 */
5491 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5492 q_handle, ICE_MAX_BW);
5494 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5495 q_handle, ICE_MAX_BW, maxrate * 1000);
5497 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5504 * ice_fdb_add - add an entry to the hardware database
5505 * @ndm: the input from the stack
5506 * @tb: pointer to array of nladdr (unused)
5507 * @dev: the net device pointer
5508 * @addr: the MAC address entry being added
5510 * @flags: instructions from stack about fdb operation
5511 * @extack: netlink extended ack
5514 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5515 struct net_device *dev, const unsigned char *addr, u16 vid,
5516 u16 flags, struct netlink_ext_ack __always_unused *extack)
5521 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5524 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5525 netdev_err(dev, "FDB only supports static addresses\n");
5529 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5530 err = dev_uc_add_excl(dev, addr);
5531 else if (is_multicast_ether_addr(addr))
5532 err = dev_mc_add_excl(dev, addr);
5536 /* Only return duplicate errors if NLM_F_EXCL is set */
5537 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5544 * ice_fdb_del - delete an entry from the hardware database
5545 * @ndm: the input from the stack
5546 * @tb: pointer to array of nladdr (unused)
5547 * @dev: the net device pointer
5548 * @addr: the MAC address entry being added
5552 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5553 struct net_device *dev, const unsigned char *addr,
5554 __always_unused u16 vid)
5558 if (ndm->ndm_state & NUD_PERMANENT) {
5559 netdev_err(dev, "FDB only supports static addresses\n");
5563 if (is_unicast_ether_addr(addr))
5564 err = dev_uc_del(dev, addr);
5565 else if (is_multicast_ether_addr(addr))
5566 err = dev_mc_del(dev, addr);
5574 * ice_set_features - set the netdev feature flags
5575 * @netdev: ptr to the netdev being adjusted
5576 * @features: the feature set that the stack is suggesting
5579 ice_set_features(struct net_device *netdev, netdev_features_t features)
5581 struct ice_netdev_priv *np = netdev_priv(netdev);
5582 struct ice_vsi *vsi = np->vsi;
5583 struct ice_pf *pf = vsi->back;
5586 /* Don't set any netdev advanced features with device in Safe Mode */
5587 if (ice_is_safe_mode(vsi->back)) {
5588 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5592 /* Do not change setting during reset */
5593 if (ice_is_reset_in_progress(pf->state)) {
5594 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5598 /* Multiple features can be changed in one call so keep features in
5599 * separate if/else statements to guarantee each feature is checked
5601 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5602 ice_vsi_manage_rss_lut(vsi, true);
5603 else if (!(features & NETIF_F_RXHASH) &&
5604 netdev->features & NETIF_F_RXHASH)
5605 ice_vsi_manage_rss_lut(vsi, false);
5607 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5608 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5609 ret = vsi->vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
5610 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5611 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5612 ret = vsi->vlan_ops.dis_stripping(vsi);
5614 if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5615 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5616 ret = vsi->vlan_ops.ena_insertion(vsi, ETH_P_8021Q);
5617 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5618 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5619 ret = vsi->vlan_ops.dis_insertion(vsi);
5621 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5622 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5623 ret = vsi->vlan_ops.ena_rx_filtering(vsi);
5624 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5625 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5626 ret = vsi->vlan_ops.dis_rx_filtering(vsi);
5628 if ((features & NETIF_F_NTUPLE) &&
5629 !(netdev->features & NETIF_F_NTUPLE)) {
5630 ice_vsi_manage_fdir(vsi, true);
5632 } else if (!(features & NETIF_F_NTUPLE) &&
5633 (netdev->features & NETIF_F_NTUPLE)) {
5634 ice_vsi_manage_fdir(vsi, false);
5635 ice_clear_arfs(vsi);
5638 /* don't turn off hw_tc_offload when ADQ is already enabled */
5639 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
5640 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
5644 if ((features & NETIF_F_HW_TC) &&
5645 !(netdev->features & NETIF_F_HW_TC))
5646 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
5648 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
5654 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5655 * @vsi: VSI to setup VLAN properties for
5657 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5661 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5662 ret = vsi->vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
5663 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5664 ret = vsi->vlan_ops.ena_insertion(vsi, ETH_P_8021Q);
5670 * ice_vsi_cfg - Setup the VSI
5671 * @vsi: the VSI being configured
5673 * Return 0 on success and negative value on error
5675 int ice_vsi_cfg(struct ice_vsi *vsi)
5680 ice_set_rx_mode(vsi->netdev);
5682 err = ice_vsi_vlan_setup(vsi);
5687 ice_vsi_cfg_dcb_rings(vsi);
5689 err = ice_vsi_cfg_lan_txqs(vsi);
5690 if (!err && ice_is_xdp_ena_vsi(vsi))
5691 err = ice_vsi_cfg_xdp_txqs(vsi);
5693 err = ice_vsi_cfg_rxqs(vsi);
5698 /* THEORY OF MODERATION:
5699 * The ice driver hardware works differently than the hardware that DIMLIB was
5700 * originally made for. ice hardware doesn't have packet count limits that
5701 * can trigger an interrupt, but it *does* have interrupt rate limit support,
5702 * which is hard-coded to a limit of 250,000 ints/second.
5703 * If not using dynamic moderation, the INTRL value can be modified
5704 * by ethtool rx-usecs-high.
5707 /* the throttle rate for interrupts, basically worst case delay before
5708 * an initial interrupt fires, value is stored in microseconds.
5713 /* Make a different profile for Rx that doesn't allow quite so aggressive
5714 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
5717 static const struct ice_dim rx_profile[] = {
5718 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
5719 {8}, /* 125,000 ints/s */
5720 {16}, /* 62,500 ints/s */
5721 {62}, /* 16,129 ints/s */
5722 {126} /* 7,936 ints/s */
5725 /* The transmit profile, which has the same sorts of values
5726 * as the previous struct
5728 static const struct ice_dim tx_profile[] = {
5729 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
5730 {8}, /* 125,000 ints/s */
5731 {40}, /* 16,125 ints/s */
5732 {128}, /* 7,812 ints/s */
5733 {256} /* 3,906 ints/s */
5736 static void ice_tx_dim_work(struct work_struct *work)
5738 struct ice_ring_container *rc;
5742 dim = container_of(work, struct dim, work);
5743 rc = (struct ice_ring_container *)dim->priv;
5745 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
5747 /* look up the values in our local table */
5748 itr = tx_profile[dim->profile_ix].itr;
5750 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
5751 ice_write_itr(rc, itr);
5753 dim->state = DIM_START_MEASURE;
5756 static void ice_rx_dim_work(struct work_struct *work)
5758 struct ice_ring_container *rc;
5762 dim = container_of(work, struct dim, work);
5763 rc = (struct ice_ring_container *)dim->priv;
5765 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
5767 /* look up the values in our local table */
5768 itr = rx_profile[dim->profile_ix].itr;
5770 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
5771 ice_write_itr(rc, itr);
5773 dim->state = DIM_START_MEASURE;
5776 #define ICE_DIM_DEFAULT_PROFILE_IX 1
5779 * ice_init_moderation - set up interrupt moderation
5780 * @q_vector: the vector containing rings to be configured
5782 * Set up interrupt moderation registers, with the intent to do the right thing
5783 * when called from reset or from probe, and whether or not dynamic moderation
5784 * is enabled or not. Take special care to write all the registers in both
5785 * dynamic moderation mode or not in order to make sure hardware is in a known
5788 static void ice_init_moderation(struct ice_q_vector *q_vector)
5790 struct ice_ring_container *rc;
5791 bool tx_dynamic, rx_dynamic;
5794 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
5795 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5796 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
5798 tx_dynamic = ITR_IS_DYNAMIC(rc);
5800 /* set the initial TX ITR to match the above */
5801 ice_write_itr(rc, tx_dynamic ?
5802 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
5805 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
5806 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5807 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
5809 rx_dynamic = ITR_IS_DYNAMIC(rc);
5811 /* set the initial RX ITR to match the above */
5812 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
5815 ice_set_q_vector_intrl(q_vector);
5819 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5820 * @vsi: the VSI being configured
5822 static void ice_napi_enable_all(struct ice_vsi *vsi)
5829 ice_for_each_q_vector(vsi, q_idx) {
5830 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5832 ice_init_moderation(q_vector);
5834 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
5835 napi_enable(&q_vector->napi);
5840 * ice_up_complete - Finish the last steps of bringing up a connection
5841 * @vsi: The VSI being configured
5843 * Return 0 on success and negative value on error
5845 static int ice_up_complete(struct ice_vsi *vsi)
5847 struct ice_pf *pf = vsi->back;
5850 ice_vsi_cfg_msix(vsi);
5852 /* Enable only Rx rings, Tx rings were enabled by the FW when the
5853 * Tx queue group list was configured and the context bits were
5854 * programmed using ice_vsi_cfg_txqs
5856 err = ice_vsi_start_all_rx_rings(vsi);
5860 clear_bit(ICE_VSI_DOWN, vsi->state);
5861 ice_napi_enable_all(vsi);
5862 ice_vsi_ena_irq(vsi);
5864 if (vsi->port_info &&
5865 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5867 ice_print_link_msg(vsi, true);
5868 netif_tx_start_all_queues(vsi->netdev);
5869 netif_carrier_on(vsi->netdev);
5870 if (!ice_is_e810(&pf->hw))
5871 ice_ptp_link_change(pf, pf->hw.pf_id, true);
5874 /* clear this now, and the first stats read will be used as baseline */
5875 vsi->stat_offsets_loaded = false;
5877 ice_service_task_schedule(pf);
5883 * ice_up - Bring the connection back up after being down
5884 * @vsi: VSI being configured
5886 int ice_up(struct ice_vsi *vsi)
5890 err = ice_vsi_cfg(vsi);
5892 err = ice_up_complete(vsi);
5898 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5899 * @syncp: pointer to u64_stats_sync
5900 * @stats: stats that pkts and bytes count will be taken from
5901 * @pkts: packets stats counter
5902 * @bytes: bytes stats counter
5904 * This function fetches stats from the ring considering the atomic operations
5905 * that needs to be performed to read u64 values in 32 bit machine.
5908 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
5909 u64 *pkts, u64 *bytes)
5914 start = u64_stats_fetch_begin_irq(syncp);
5916 *bytes = stats.bytes;
5917 } while (u64_stats_fetch_retry_irq(syncp, start));
5921 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5922 * @vsi: the VSI to be updated
5923 * @vsi_stats: the stats struct to be updated
5924 * @rings: rings to work on
5925 * @count: number of rings
5928 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
5929 struct rtnl_link_stats64 *vsi_stats,
5930 struct ice_tx_ring **rings, u16 count)
5934 for (i = 0; i < count; i++) {
5935 struct ice_tx_ring *ring;
5936 u64 pkts = 0, bytes = 0;
5938 ring = READ_ONCE(rings[i]);
5940 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
5941 vsi_stats->tx_packets += pkts;
5942 vsi_stats->tx_bytes += bytes;
5943 vsi->tx_restart += ring->tx_stats.restart_q;
5944 vsi->tx_busy += ring->tx_stats.tx_busy;
5945 vsi->tx_linearize += ring->tx_stats.tx_linearize;
5950 * ice_update_vsi_ring_stats - Update VSI stats counters
5951 * @vsi: the VSI to be updated
5953 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5955 struct rtnl_link_stats64 *vsi_stats;
5959 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
5963 /* reset non-netdev (extended) stats */
5964 vsi->tx_restart = 0;
5966 vsi->tx_linearize = 0;
5967 vsi->rx_buf_failed = 0;
5968 vsi->rx_page_failed = 0;
5972 /* update Tx rings counters */
5973 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
5976 /* update Rx rings counters */
5977 ice_for_each_rxq(vsi, i) {
5978 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5980 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
5981 vsi_stats->rx_packets += pkts;
5982 vsi_stats->rx_bytes += bytes;
5983 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5984 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5987 /* update XDP Tx rings counters */
5988 if (ice_is_xdp_ena_vsi(vsi))
5989 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
5994 vsi->net_stats.tx_packets = vsi_stats->tx_packets;
5995 vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
5996 vsi->net_stats.rx_packets = vsi_stats->rx_packets;
5997 vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
6003 * ice_update_vsi_stats - Update VSI stats counters
6004 * @vsi: the VSI to be updated
6006 void ice_update_vsi_stats(struct ice_vsi *vsi)
6008 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6009 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6010 struct ice_pf *pf = vsi->back;
6012 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6013 test_bit(ICE_CFG_BUSY, pf->state))
6016 /* get stats as recorded by Tx/Rx rings */
6017 ice_update_vsi_ring_stats(vsi);
6019 /* get VSI stats as recorded by the hardware */
6020 ice_update_eth_stats(vsi);
6022 cur_ns->tx_errors = cur_es->tx_errors;
6023 cur_ns->rx_dropped = cur_es->rx_discards;
6024 cur_ns->tx_dropped = cur_es->tx_discards;
6025 cur_ns->multicast = cur_es->rx_multicast;
6027 /* update some more netdev stats if this is main VSI */
6028 if (vsi->type == ICE_VSI_PF) {
6029 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6030 cur_ns->rx_errors = pf->stats.crc_errors +
6031 pf->stats.illegal_bytes +
6032 pf->stats.rx_len_errors +
6033 pf->stats.rx_undersize +
6034 pf->hw_csum_rx_error +
6035 pf->stats.rx_jabber +
6036 pf->stats.rx_fragments +
6037 pf->stats.rx_oversize;
6038 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6039 /* record drops from the port level */
6040 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6045 * ice_update_pf_stats - Update PF port stats counters
6046 * @pf: PF whose stats needs to be updated
6048 void ice_update_pf_stats(struct ice_pf *pf)
6050 struct ice_hw_port_stats *prev_ps, *cur_ps;
6051 struct ice_hw *hw = &pf->hw;
6055 port = hw->port_info->lport;
6056 prev_ps = &pf->stats_prev;
6057 cur_ps = &pf->stats;
6059 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6060 &prev_ps->eth.rx_bytes,
6061 &cur_ps->eth.rx_bytes);
6063 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6064 &prev_ps->eth.rx_unicast,
6065 &cur_ps->eth.rx_unicast);
6067 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6068 &prev_ps->eth.rx_multicast,
6069 &cur_ps->eth.rx_multicast);
6071 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6072 &prev_ps->eth.rx_broadcast,
6073 &cur_ps->eth.rx_broadcast);
6075 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6076 &prev_ps->eth.rx_discards,
6077 &cur_ps->eth.rx_discards);
6079 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6080 &prev_ps->eth.tx_bytes,
6081 &cur_ps->eth.tx_bytes);
6083 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6084 &prev_ps->eth.tx_unicast,
6085 &cur_ps->eth.tx_unicast);
6087 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6088 &prev_ps->eth.tx_multicast,
6089 &cur_ps->eth.tx_multicast);
6091 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6092 &prev_ps->eth.tx_broadcast,
6093 &cur_ps->eth.tx_broadcast);
6095 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6096 &prev_ps->tx_dropped_link_down,
6097 &cur_ps->tx_dropped_link_down);
6099 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6100 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6102 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6103 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6105 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6106 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6108 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6109 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6111 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6112 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6114 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6115 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6117 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6118 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6120 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6121 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6123 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6124 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6126 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6127 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6129 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6130 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6132 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6133 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6135 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6136 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6138 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6139 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6141 fd_ctr_base = hw->fd_ctr_base;
6143 ice_stat_update40(hw,
6144 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6145 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6146 &cur_ps->fd_sb_match);
6147 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6148 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6150 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6151 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6153 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6154 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6156 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6157 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6159 ice_update_dcb_stats(pf);
6161 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6162 &prev_ps->crc_errors, &cur_ps->crc_errors);
6164 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6165 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6167 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6168 &prev_ps->mac_local_faults,
6169 &cur_ps->mac_local_faults);
6171 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6172 &prev_ps->mac_remote_faults,
6173 &cur_ps->mac_remote_faults);
6175 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6176 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6178 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6179 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6181 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6182 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6184 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6185 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6187 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6188 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6190 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6192 pf->stat_prev_loaded = true;
6196 * ice_get_stats64 - get statistics for network device structure
6197 * @netdev: network interface device structure
6198 * @stats: main device statistics structure
6201 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6203 struct ice_netdev_priv *np = netdev_priv(netdev);
6204 struct rtnl_link_stats64 *vsi_stats;
6205 struct ice_vsi *vsi = np->vsi;
6207 vsi_stats = &vsi->net_stats;
6209 if (!vsi->num_txq || !vsi->num_rxq)
6212 /* netdev packet/byte stats come from ring counter. These are obtained
6213 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6214 * But, only call the update routine and read the registers if VSI is
6217 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6218 ice_update_vsi_ring_stats(vsi);
6219 stats->tx_packets = vsi_stats->tx_packets;
6220 stats->tx_bytes = vsi_stats->tx_bytes;
6221 stats->rx_packets = vsi_stats->rx_packets;
6222 stats->rx_bytes = vsi_stats->rx_bytes;
6224 /* The rest of the stats can be read from the hardware but instead we
6225 * just return values that the watchdog task has already obtained from
6228 stats->multicast = vsi_stats->multicast;
6229 stats->tx_errors = vsi_stats->tx_errors;
6230 stats->tx_dropped = vsi_stats->tx_dropped;
6231 stats->rx_errors = vsi_stats->rx_errors;
6232 stats->rx_dropped = vsi_stats->rx_dropped;
6233 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6234 stats->rx_length_errors = vsi_stats->rx_length_errors;
6238 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6239 * @vsi: VSI having NAPI disabled
6241 static void ice_napi_disable_all(struct ice_vsi *vsi)
6248 ice_for_each_q_vector(vsi, q_idx) {
6249 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6251 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6252 napi_disable(&q_vector->napi);
6254 cancel_work_sync(&q_vector->tx.dim.work);
6255 cancel_work_sync(&q_vector->rx.dim.work);
6260 * ice_down - Shutdown the connection
6261 * @vsi: The VSI being stopped
6263 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6265 int ice_down(struct ice_vsi *vsi)
6267 int i, tx_err, rx_err, link_err = 0;
6269 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6271 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6272 if (!ice_is_e810(&vsi->back->hw))
6273 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6274 netif_carrier_off(vsi->netdev);
6275 netif_tx_disable(vsi->netdev);
6276 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6277 ice_eswitch_stop_all_tx_queues(vsi->back);
6280 ice_vsi_dis_irq(vsi);
6282 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6284 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6285 vsi->vsi_num, tx_err);
6286 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6287 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6289 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6290 vsi->vsi_num, tx_err);
6293 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6295 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6296 vsi->vsi_num, rx_err);
6298 ice_napi_disable_all(vsi);
6300 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6301 link_err = ice_force_phys_link_state(vsi, false);
6303 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6304 vsi->vsi_num, link_err);
6307 ice_for_each_txq(vsi, i)
6308 ice_clean_tx_ring(vsi->tx_rings[i]);
6310 ice_for_each_rxq(vsi, i)
6311 ice_clean_rx_ring(vsi->rx_rings[i]);
6313 if (tx_err || rx_err || link_err) {
6314 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6315 vsi->vsi_num, vsi->vsw->sw_id);
6323 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6324 * @vsi: VSI having resources allocated
6326 * Return 0 on success, negative on failure
6328 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6332 if (!vsi->num_txq) {
6333 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6338 ice_for_each_txq(vsi, i) {
6339 struct ice_tx_ring *ring = vsi->tx_rings[i];
6345 ring->netdev = vsi->netdev;
6346 err = ice_setup_tx_ring(ring);
6355 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6356 * @vsi: VSI having resources allocated
6358 * Return 0 on success, negative on failure
6360 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6364 if (!vsi->num_rxq) {
6365 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6370 ice_for_each_rxq(vsi, i) {
6371 struct ice_rx_ring *ring = vsi->rx_rings[i];
6377 ring->netdev = vsi->netdev;
6378 err = ice_setup_rx_ring(ring);
6387 * ice_vsi_open_ctrl - open control VSI for use
6388 * @vsi: the VSI to open
6390 * Initialization of the Control VSI
6392 * Returns 0 on success, negative value on error
6394 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6396 char int_name[ICE_INT_NAME_STR_LEN];
6397 struct ice_pf *pf = vsi->back;
6401 dev = ice_pf_to_dev(pf);
6402 /* allocate descriptors */
6403 err = ice_vsi_setup_tx_rings(vsi);
6407 err = ice_vsi_setup_rx_rings(vsi);
6411 err = ice_vsi_cfg(vsi);
6415 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6416 dev_driver_string(dev), dev_name(dev));
6417 err = ice_vsi_req_irq_msix(vsi, int_name);
6421 ice_vsi_cfg_msix(vsi);
6423 err = ice_vsi_start_all_rx_rings(vsi);
6425 goto err_up_complete;
6427 clear_bit(ICE_VSI_DOWN, vsi->state);
6428 ice_vsi_ena_irq(vsi);
6435 ice_vsi_free_rx_rings(vsi);
6437 ice_vsi_free_tx_rings(vsi);
6443 * ice_vsi_open - Called when a network interface is made active
6444 * @vsi: the VSI to open
6446 * Initialization of the VSI
6448 * Returns 0 on success, negative value on error
6450 int ice_vsi_open(struct ice_vsi *vsi)
6452 char int_name[ICE_INT_NAME_STR_LEN];
6453 struct ice_pf *pf = vsi->back;
6456 /* allocate descriptors */
6457 err = ice_vsi_setup_tx_rings(vsi);
6461 err = ice_vsi_setup_rx_rings(vsi);
6465 err = ice_vsi_cfg(vsi);
6469 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6470 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6471 err = ice_vsi_req_irq_msix(vsi, int_name);
6475 if (vsi->type == ICE_VSI_PF) {
6476 /* Notify the stack of the actual queue counts. */
6477 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6481 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6486 err = ice_up_complete(vsi);
6488 goto err_up_complete;
6495 ice_vsi_free_irq(vsi);
6497 ice_vsi_free_rx_rings(vsi);
6499 ice_vsi_free_tx_rings(vsi);
6505 * ice_vsi_release_all - Delete all VSIs
6506 * @pf: PF from which all VSIs are being removed
6508 static void ice_vsi_release_all(struct ice_pf *pf)
6515 ice_for_each_vsi(pf, i) {
6519 if (pf->vsi[i]->type == ICE_VSI_CHNL)
6522 err = ice_vsi_release(pf->vsi[i]);
6524 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6525 i, err, pf->vsi[i]->vsi_num);
6530 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6531 * @pf: pointer to the PF instance
6532 * @type: VSI type to rebuild
6534 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6536 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6538 struct device *dev = ice_pf_to_dev(pf);
6541 ice_for_each_vsi(pf, i) {
6542 struct ice_vsi *vsi = pf->vsi[i];
6544 if (!vsi || vsi->type != type)
6547 /* rebuild the VSI */
6548 err = ice_vsi_rebuild(vsi, true);
6550 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6551 err, vsi->idx, ice_vsi_type_str(type));
6555 /* replay filters for the VSI */
6556 err = ice_replay_vsi(&pf->hw, vsi->idx);
6558 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
6559 err, vsi->idx, ice_vsi_type_str(type));
6563 /* Re-map HW VSI number, using VSI handle that has been
6564 * previously validated in ice_replay_vsi() call above
6566 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6568 /* enable the VSI */
6569 err = ice_ena_vsi(vsi, false);
6571 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6572 err, vsi->idx, ice_vsi_type_str(type));
6576 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6577 ice_vsi_type_str(type));
6584 * ice_update_pf_netdev_link - Update PF netdev link status
6585 * @pf: pointer to the PF instance
6587 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6592 ice_for_each_vsi(pf, i) {
6593 struct ice_vsi *vsi = pf->vsi[i];
6595 if (!vsi || vsi->type != ICE_VSI_PF)
6598 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6600 netif_carrier_on(pf->vsi[i]->netdev);
6601 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6603 netif_carrier_off(pf->vsi[i]->netdev);
6604 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6610 * ice_rebuild - rebuild after reset
6611 * @pf: PF to rebuild
6612 * @reset_type: type of reset
6614 * Do not rebuild VF VSI in this flow because that is already handled via
6615 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6616 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6617 * to reset/rebuild all the VF VSI twice.
6619 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6621 struct device *dev = ice_pf_to_dev(pf);
6622 struct ice_hw *hw = &pf->hw;
6625 if (test_bit(ICE_DOWN, pf->state))
6626 goto clear_recovery;
6628 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6630 if (reset_type == ICE_RESET_EMPR) {
6631 /* If an EMP reset has occurred, any previously pending flash
6632 * update will have completed. We no longer know whether or
6633 * not the NVM update EMP reset is restricted.
6635 pf->fw_emp_reset_disabled = false;
6638 err = ice_init_all_ctrlq(hw);
6640 dev_err(dev, "control queues init failed %d\n", err);
6641 goto err_init_ctrlq;
6644 /* if DDP was previously loaded successfully */
6645 if (!ice_is_safe_mode(pf)) {
6646 /* reload the SW DB of filter tables */
6647 if (reset_type == ICE_RESET_PFR)
6648 ice_fill_blk_tbls(hw);
6650 /* Reload DDP Package after CORER/GLOBR reset */
6651 ice_load_pkg(NULL, pf);
6654 err = ice_clear_pf_cfg(hw);
6656 dev_err(dev, "clear PF configuration failed %d\n", err);
6657 goto err_init_ctrlq;
6660 if (pf->first_sw->dflt_vsi_ena)
6661 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6662 /* clear the default VSI configuration if it exists */
6663 pf->first_sw->dflt_vsi = NULL;
6664 pf->first_sw->dflt_vsi_ena = false;
6666 ice_clear_pxe_mode(hw);
6668 err = ice_init_nvm(hw);
6670 dev_err(dev, "ice_init_nvm failed %d\n", err);
6671 goto err_init_ctrlq;
6674 err = ice_get_caps(hw);
6676 dev_err(dev, "ice_get_caps failed %d\n", err);
6677 goto err_init_ctrlq;
6680 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6682 dev_err(dev, "set_mac_cfg failed %d\n", err);
6683 goto err_init_ctrlq;
6686 err = ice_sched_init_port(hw->port_info);
6688 goto err_sched_init_port;
6690 /* start misc vector */
6691 err = ice_req_irq_msix_misc(pf);
6693 dev_err(dev, "misc vector setup failed: %d\n", err);
6694 goto err_sched_init_port;
6697 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6698 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6699 if (!rd32(hw, PFQF_FD_SIZE)) {
6700 u16 unused, guar, b_effort;
6702 guar = hw->func_caps.fd_fltr_guar;
6703 b_effort = hw->func_caps.fd_fltr_best_effort;
6705 /* force guaranteed filter pool for PF */
6706 ice_alloc_fd_guar_item(hw, &unused, guar);
6707 /* force shared filter pool for PF */
6708 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6712 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6713 ice_dcb_rebuild(pf);
6715 /* If the PF previously had enabled PTP, PTP init needs to happen before
6716 * the VSI rebuild. If not, this causes the PTP link status events to
6719 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6722 /* rebuild PF VSI */
6723 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6725 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6726 goto err_vsi_rebuild;
6729 /* configure PTP timestamping after VSI rebuild */
6730 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6731 ice_ptp_cfg_timestamp(pf, false);
6733 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
6735 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
6736 goto err_vsi_rebuild;
6739 if (reset_type == ICE_RESET_PFR) {
6740 err = ice_rebuild_channels(pf);
6742 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
6744 goto err_vsi_rebuild;
6748 /* If Flow Director is active */
6749 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6750 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6752 dev_err(dev, "control VSI rebuild failed: %d\n", err);
6753 goto err_vsi_rebuild;
6756 /* replay HW Flow Director recipes */
6758 ice_fdir_replay_flows(hw);
6760 /* replay Flow Director filters */
6761 ice_fdir_replay_fltrs(pf);
6763 ice_rebuild_arfs(pf);
6766 ice_update_pf_netdev_link(pf);
6768 /* tell the firmware we are up */
6769 err = ice_send_version(pf);
6771 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
6773 goto err_vsi_rebuild;
6776 ice_replay_post(hw);
6778 /* if we get here, reset flow is successful */
6779 clear_bit(ICE_RESET_FAILED, pf->state);
6781 ice_plug_aux_dev(pf);
6785 err_sched_init_port:
6786 ice_sched_cleanup_all(hw);
6788 ice_shutdown_all_ctrlq(hw);
6789 set_bit(ICE_RESET_FAILED, pf->state);
6791 /* set this bit in PF state to control service task scheduling */
6792 set_bit(ICE_NEEDS_RESTART, pf->state);
6793 dev_err(dev, "Rebuild failed, unload and reload driver\n");
6797 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6798 * @vsi: Pointer to VSI structure
6800 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6802 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6803 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6805 return ICE_RXBUF_3072;
6809 * ice_change_mtu - NDO callback to change the MTU
6810 * @netdev: network interface device structure
6811 * @new_mtu: new value for maximum frame size
6813 * Returns 0 on success, negative on failure
6815 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6817 struct ice_netdev_priv *np = netdev_priv(netdev);
6818 struct ice_vsi *vsi = np->vsi;
6819 struct ice_pf *pf = vsi->back;
6820 struct iidc_event *event;
6824 if (new_mtu == (int)netdev->mtu) {
6825 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6829 if (ice_is_xdp_ena_vsi(vsi)) {
6830 int frame_size = ice_max_xdp_frame_size(vsi);
6832 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6833 netdev_err(netdev, "max MTU for XDP usage is %d\n",
6834 frame_size - ICE_ETH_PKT_HDR_PAD);
6839 /* if a reset is in progress, wait for some time for it to complete */
6841 if (ice_is_reset_in_progress(pf->state)) {
6843 usleep_range(1000, 2000);
6848 } while (count < 100);
6851 netdev_err(netdev, "can't change MTU. Device is busy\n");
6855 event = kzalloc(sizeof(*event), GFP_KERNEL);
6859 set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6860 ice_send_event_to_aux(pf, event);
6861 clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
6863 netdev->mtu = (unsigned int)new_mtu;
6865 /* if VSI is up, bring it down and then back up */
6866 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6867 err = ice_down(vsi);
6869 netdev_err(netdev, "change MTU if_down err %d\n", err);
6875 netdev_err(netdev, "change MTU if_up err %d\n", err);
6880 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6882 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
6883 ice_send_event_to_aux(pf, event);
6890 * ice_eth_ioctl - Access the hwtstamp interface
6891 * @netdev: network interface device structure
6892 * @ifr: interface request data
6893 * @cmd: ioctl command
6895 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6897 struct ice_netdev_priv *np = netdev_priv(netdev);
6898 struct ice_pf *pf = np->vsi->back;
6902 return ice_ptp_get_ts_config(pf, ifr);
6904 return ice_ptp_set_ts_config(pf, ifr);
6911 * ice_aq_str - convert AQ err code to a string
6912 * @aq_err: the AQ error code to convert
6914 const char *ice_aq_str(enum ice_aq_err aq_err)
6919 case ICE_AQ_RC_EPERM:
6920 return "ICE_AQ_RC_EPERM";
6921 case ICE_AQ_RC_ENOENT:
6922 return "ICE_AQ_RC_ENOENT";
6923 case ICE_AQ_RC_ENOMEM:
6924 return "ICE_AQ_RC_ENOMEM";
6925 case ICE_AQ_RC_EBUSY:
6926 return "ICE_AQ_RC_EBUSY";
6927 case ICE_AQ_RC_EEXIST:
6928 return "ICE_AQ_RC_EEXIST";
6929 case ICE_AQ_RC_EINVAL:
6930 return "ICE_AQ_RC_EINVAL";
6931 case ICE_AQ_RC_ENOSPC:
6932 return "ICE_AQ_RC_ENOSPC";
6933 case ICE_AQ_RC_ENOSYS:
6934 return "ICE_AQ_RC_ENOSYS";
6935 case ICE_AQ_RC_EMODE:
6936 return "ICE_AQ_RC_EMODE";
6937 case ICE_AQ_RC_ENOSEC:
6938 return "ICE_AQ_RC_ENOSEC";
6939 case ICE_AQ_RC_EBADSIG:
6940 return "ICE_AQ_RC_EBADSIG";
6941 case ICE_AQ_RC_ESVN:
6942 return "ICE_AQ_RC_ESVN";
6943 case ICE_AQ_RC_EBADMAN:
6944 return "ICE_AQ_RC_EBADMAN";
6945 case ICE_AQ_RC_EBADBUF:
6946 return "ICE_AQ_RC_EBADBUF";
6949 return "ICE_AQ_RC_UNKNOWN";
6953 * ice_set_rss_lut - Set RSS LUT
6954 * @vsi: Pointer to VSI structure
6955 * @lut: Lookup table
6956 * @lut_size: Lookup table size
6958 * Returns 0 on success, negative on failure
6960 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6962 struct ice_aq_get_set_rss_lut_params params = {};
6963 struct ice_hw *hw = &vsi->back->hw;
6969 params.vsi_handle = vsi->idx;
6970 params.lut_size = lut_size;
6971 params.lut_type = vsi->rss_lut_type;
6974 status = ice_aq_set_rss_lut(hw, ¶ms);
6976 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
6977 status, ice_aq_str(hw->adminq.sq_last_status));
6983 * ice_set_rss_key - Set RSS key
6984 * @vsi: Pointer to the VSI structure
6985 * @seed: RSS hash seed
6987 * Returns 0 on success, negative on failure
6989 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6991 struct ice_hw *hw = &vsi->back->hw;
6997 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6999 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7000 status, ice_aq_str(hw->adminq.sq_last_status));
7006 * ice_get_rss_lut - Get RSS LUT
7007 * @vsi: Pointer to VSI structure
7008 * @lut: Buffer to store the lookup table entries
7009 * @lut_size: Size of buffer to store the lookup table entries
7011 * Returns 0 on success, negative on failure
7013 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7015 struct ice_aq_get_set_rss_lut_params params = {};
7016 struct ice_hw *hw = &vsi->back->hw;
7022 params.vsi_handle = vsi->idx;
7023 params.lut_size = lut_size;
7024 params.lut_type = vsi->rss_lut_type;
7027 status = ice_aq_get_rss_lut(hw, ¶ms);
7029 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7030 status, ice_aq_str(hw->adminq.sq_last_status));
7036 * ice_get_rss_key - Get RSS key
7037 * @vsi: Pointer to VSI structure
7038 * @seed: Buffer to store the key in
7040 * Returns 0 on success, negative on failure
7042 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7044 struct ice_hw *hw = &vsi->back->hw;
7050 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7052 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7053 status, ice_aq_str(hw->adminq.sq_last_status));
7059 * ice_bridge_getlink - Get the hardware bridge mode
7062 * @seq: RTNL message seq
7063 * @dev: the netdev being configured
7064 * @filter_mask: filter mask passed in
7065 * @nlflags: netlink flags passed in
7067 * Return the bridge mode (VEB/VEPA)
7070 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7071 struct net_device *dev, u32 filter_mask, int nlflags)
7073 struct ice_netdev_priv *np = netdev_priv(dev);
7074 struct ice_vsi *vsi = np->vsi;
7075 struct ice_pf *pf = vsi->back;
7078 bmode = pf->first_sw->bridge_mode;
7080 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7085 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7086 * @vsi: Pointer to VSI structure
7087 * @bmode: Hardware bridge mode (VEB/VEPA)
7089 * Returns 0 on success, negative on failure
7091 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7093 struct ice_aqc_vsi_props *vsi_props;
7094 struct ice_hw *hw = &vsi->back->hw;
7095 struct ice_vsi_ctx *ctxt;
7098 vsi_props = &vsi->info;
7100 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7104 ctxt->info = vsi->info;
7106 if (bmode == BRIDGE_MODE_VEB)
7107 /* change from VEPA to VEB mode */
7108 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7110 /* change from VEB to VEPA mode */
7111 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7112 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7114 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7116 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7117 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7120 /* Update sw flags for book keeping */
7121 vsi_props->sw_flags = ctxt->info.sw_flags;
7129 * ice_bridge_setlink - Set the hardware bridge mode
7130 * @dev: the netdev being configured
7131 * @nlh: RTNL message
7132 * @flags: bridge setlink flags
7133 * @extack: netlink extended ack
7135 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7136 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7137 * not already set for all VSIs connected to this switch. And also update the
7138 * unicast switch filter rules for the corresponding switch of the netdev.
7141 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7142 u16 __always_unused flags,
7143 struct netlink_ext_ack __always_unused *extack)
7145 struct ice_netdev_priv *np = netdev_priv(dev);
7146 struct ice_pf *pf = np->vsi->back;
7147 struct nlattr *attr, *br_spec;
7148 struct ice_hw *hw = &pf->hw;
7149 struct ice_sw *pf_sw;
7150 int rem, v, err = 0;
7152 pf_sw = pf->first_sw;
7153 /* find the attribute in the netlink message */
7154 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7156 nla_for_each_nested(attr, br_spec, rem) {
7159 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7161 mode = nla_get_u16(attr);
7162 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7164 /* Continue if bridge mode is not being flipped */
7165 if (mode == pf_sw->bridge_mode)
7167 /* Iterates through the PF VSI list and update the loopback
7170 ice_for_each_vsi(pf, v) {
7173 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7178 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7179 /* Update the unicast switch filter rules for the corresponding
7180 * switch of the netdev
7182 err = ice_update_sw_rule_bridge_mode(hw);
7184 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7186 ice_aq_str(hw->adminq.sq_last_status));
7187 /* revert hw->evb_veb */
7188 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7192 pf_sw->bridge_mode = mode;
7199 * ice_tx_timeout - Respond to a Tx Hang
7200 * @netdev: network interface device structure
7201 * @txqueue: Tx queue
7203 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7205 struct ice_netdev_priv *np = netdev_priv(netdev);
7206 struct ice_tx_ring *tx_ring = NULL;
7207 struct ice_vsi *vsi = np->vsi;
7208 struct ice_pf *pf = vsi->back;
7211 pf->tx_timeout_count++;
7213 /* Check if PFC is enabled for the TC to which the queue belongs
7214 * to. If yes then Tx timeout is not caused by a hung queue, no
7215 * need to reset and rebuild
7217 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7218 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7223 /* now that we have an index, find the tx_ring struct */
7224 ice_for_each_txq(vsi, i)
7225 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7226 if (txqueue == vsi->tx_rings[i]->q_index) {
7227 tx_ring = vsi->tx_rings[i];
7231 /* Reset recovery level if enough time has elapsed after last timeout.
7232 * Also ensure no new reset action happens before next timeout period.
7234 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7235 pf->tx_timeout_recovery_level = 1;
7236 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7237 netdev->watchdog_timeo)))
7241 struct ice_hw *hw = &pf->hw;
7244 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7245 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7246 /* Read interrupt register */
7247 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7249 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7250 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7251 head, tx_ring->next_to_use, val);
7254 pf->tx_timeout_last_recovery = jiffies;
7255 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7256 pf->tx_timeout_recovery_level, txqueue);
7258 switch (pf->tx_timeout_recovery_level) {
7260 set_bit(ICE_PFR_REQ, pf->state);
7263 set_bit(ICE_CORER_REQ, pf->state);
7266 set_bit(ICE_GLOBR_REQ, pf->state);
7269 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7270 set_bit(ICE_DOWN, pf->state);
7271 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7272 set_bit(ICE_SERVICE_DIS, pf->state);
7276 ice_service_task_schedule(pf);
7277 pf->tx_timeout_recovery_level++;
7281 * ice_setup_tc_cls_flower - flower classifier offloads
7282 * @np: net device to configure
7283 * @filter_dev: device on which filter is added
7284 * @cls_flower: offload data
7287 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7288 struct net_device *filter_dev,
7289 struct flow_cls_offload *cls_flower)
7291 struct ice_vsi *vsi = np->vsi;
7293 if (cls_flower->common.chain_index)
7296 switch (cls_flower->command) {
7297 case FLOW_CLS_REPLACE:
7298 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7299 case FLOW_CLS_DESTROY:
7300 return ice_del_cls_flower(vsi, cls_flower);
7307 * ice_setup_tc_block_cb - callback handler registered for TC block
7308 * @type: TC SETUP type
7309 * @type_data: TC flower offload data that contains user input
7310 * @cb_priv: netdev private data
7313 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7315 struct ice_netdev_priv *np = cb_priv;
7318 case TC_SETUP_CLSFLOWER:
7319 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7327 * ice_validate_mqprio_qopt - Validate TCF input parameters
7328 * @vsi: Pointer to VSI
7329 * @mqprio_qopt: input parameters for mqprio queue configuration
7331 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7332 * needed), and make sure user doesn't specify qcount and BW rate limit
7333 * for TCs, which are more than "num_tc"
7336 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7337 struct tc_mqprio_qopt_offload *mqprio_qopt)
7339 u64 sum_max_rate = 0, sum_min_rate = 0;
7340 int non_power_of_2_qcount = 0;
7341 struct ice_pf *pf = vsi->back;
7342 int max_rss_q_cnt = 0;
7347 if (vsi->type != ICE_VSI_PF)
7350 if (mqprio_qopt->qopt.offset[0] != 0 ||
7351 mqprio_qopt->qopt.num_tc < 1 ||
7352 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7355 dev = ice_pf_to_dev(pf);
7356 vsi->ch_rss_size = 0;
7357 num_tc = mqprio_qopt->qopt.num_tc;
7359 for (i = 0; num_tc; i++) {
7360 int qcount = mqprio_qopt->qopt.count[i];
7361 u64 max_rate, min_rate, rem;
7366 if (is_power_of_2(qcount)) {
7367 if (non_power_of_2_qcount &&
7368 qcount > non_power_of_2_qcount) {
7369 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7370 qcount, non_power_of_2_qcount);
7373 if (qcount > max_rss_q_cnt)
7374 max_rss_q_cnt = qcount;
7376 if (non_power_of_2_qcount &&
7377 qcount != non_power_of_2_qcount) {
7378 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7379 qcount, non_power_of_2_qcount);
7382 if (qcount < max_rss_q_cnt) {
7383 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7384 qcount, max_rss_q_cnt);
7387 max_rss_q_cnt = qcount;
7388 non_power_of_2_qcount = qcount;
7391 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7392 * converts the bandwidth rate limit into Bytes/s when
7393 * passing it down to the driver. So convert input bandwidth
7394 * from Bytes/s to Kbps
7396 max_rate = mqprio_qopt->max_rate[i];
7397 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7398 sum_max_rate += max_rate;
7400 /* min_rate is minimum guaranteed rate and it can't be zero */
7401 min_rate = mqprio_qopt->min_rate[i];
7402 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7403 sum_min_rate += min_rate;
7405 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7406 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7407 min_rate, ICE_MIN_BW_LIMIT);
7411 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7413 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7414 i, ICE_MIN_BW_LIMIT);
7418 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7420 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7421 i, ICE_MIN_BW_LIMIT);
7425 /* min_rate can't be more than max_rate, except when max_rate
7426 * is zero (implies max_rate sought is max line rate). In such
7427 * a case min_rate can be more than max.
7429 if (max_rate && min_rate > max_rate) {
7430 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7431 min_rate, max_rate);
7435 if (i >= mqprio_qopt->qopt.num_tc - 1)
7437 if (mqprio_qopt->qopt.offset[i + 1] !=
7438 (mqprio_qopt->qopt.offset[i] + qcount))
7442 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7445 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7448 speed = ice_get_link_speed_kbps(vsi);
7449 if (sum_max_rate && sum_max_rate > (u64)speed) {
7450 dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
7451 sum_max_rate, speed);
7454 if (sum_min_rate && sum_min_rate > (u64)speed) {
7455 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7456 sum_min_rate, speed);
7460 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
7461 vsi->ch_rss_size = max_rss_q_cnt;
7467 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
7468 * @pf: ptr to PF device
7471 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
7473 struct device *dev = ice_pf_to_dev(pf);
7478 if (!(vsi->num_gfltr || vsi->num_bfltr))
7482 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
7483 struct ice_fd_hw_prof *prof;
7487 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
7488 hw->fdir_prof[flow]->cnt))
7491 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
7492 enum ice_flow_priority prio;
7495 /* add this VSI to FDir profile for this flow */
7496 prio = ICE_FLOW_PRIO_NORMAL;
7497 prof = hw->fdir_prof[flow];
7498 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
7499 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
7500 prof->vsi_h[0], vsi->idx,
7501 prio, prof->fdir_seg[tun],
7504 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
7509 prof->entry_h[prof->cnt][tun] = entry_h;
7512 /* store VSI for filter replay and delete */
7513 prof->vsi_h[prof->cnt] = vsi->idx;
7517 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
7522 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
7528 * ice_add_channel - add a channel by adding VSI
7529 * @pf: ptr to PF device
7530 * @sw_id: underlying HW switching element ID
7531 * @ch: ptr to channel structure
7533 * Add a channel (VSI) using add_vsi and queue_map
7535 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
7537 struct device *dev = ice_pf_to_dev(pf);
7538 struct ice_vsi *vsi;
7540 if (ch->type != ICE_VSI_CHNL) {
7541 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
7545 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
7546 if (!vsi || vsi->type != ICE_VSI_CHNL) {
7547 dev_err(dev, "create chnl VSI failure\n");
7551 ice_add_vsi_to_fdir(pf, vsi);
7554 ch->vsi_num = vsi->vsi_num;
7555 ch->info.mapping_flags = vsi->info.mapping_flags;
7557 /* set the back pointer of channel for newly created VSI */
7560 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
7561 sizeof(vsi->info.q_mapping));
7562 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
7563 sizeof(vsi->info.tc_mapping));
7570 * @vsi: the VSI being setup
7571 * @ch: ptr to channel structure
7573 * Configure channel specific resources such as rings, vector.
7575 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
7579 for (i = 0; i < ch->num_txq; i++) {
7580 struct ice_q_vector *tx_q_vector, *rx_q_vector;
7581 struct ice_ring_container *rc;
7582 struct ice_tx_ring *tx_ring;
7583 struct ice_rx_ring *rx_ring;
7585 tx_ring = vsi->tx_rings[ch->base_q + i];
7586 rx_ring = vsi->rx_rings[ch->base_q + i];
7587 if (!tx_ring || !rx_ring)
7590 /* setup ring being channel enabled */
7594 /* following code block sets up vector specific attributes */
7595 tx_q_vector = tx_ring->q_vector;
7596 rx_q_vector = rx_ring->q_vector;
7597 if (!tx_q_vector && !rx_q_vector)
7601 tx_q_vector->ch = ch;
7602 /* setup Tx and Rx ITR setting if DIM is off */
7603 rc = &tx_q_vector->tx;
7604 if (!ITR_IS_DYNAMIC(rc))
7605 ice_write_itr(rc, rc->itr_setting);
7608 rx_q_vector->ch = ch;
7609 /* setup Tx and Rx ITR setting if DIM is off */
7610 rc = &rx_q_vector->rx;
7611 if (!ITR_IS_DYNAMIC(rc))
7612 ice_write_itr(rc, rc->itr_setting);
7616 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
7617 * GLINT_ITR register would have written to perform in-context
7618 * update, hence perform flush
7620 if (ch->num_txq || ch->num_rxq)
7621 ice_flush(&vsi->back->hw);
7625 * ice_cfg_chnl_all_res - configure channel resources
7626 * @vsi: pte to main_vsi
7627 * @ch: ptr to channel structure
7629 * This function configures channel specific resources such as flow-director
7630 * counter index, and other resources such as queues, vectors, ITR settings
7633 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
7635 /* configure channel (aka ADQ) resources such as queues, vectors,
7636 * ITR settings for channel specific vectors and anything else
7638 ice_chnl_cfg_res(vsi, ch);
7642 * ice_setup_hw_channel - setup new channel
7643 * @pf: ptr to PF device
7644 * @vsi: the VSI being setup
7645 * @ch: ptr to channel structure
7646 * @sw_id: underlying HW switching element ID
7647 * @type: type of channel to be created (VMDq2/VF)
7649 * Setup new channel (VSI) based on specified type (VMDq2/VF)
7650 * and configures Tx rings accordingly
7653 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
7654 struct ice_channel *ch, u16 sw_id, u8 type)
7656 struct device *dev = ice_pf_to_dev(pf);
7659 ch->base_q = vsi->next_base_q;
7662 ret = ice_add_channel(pf, sw_id, ch);
7664 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
7668 /* configure/setup ADQ specific resources */
7669 ice_cfg_chnl_all_res(vsi, ch);
7671 /* make sure to update the next_base_q so that subsequent channel's
7672 * (aka ADQ) VSI queue map is correct
7674 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
7675 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
7682 * ice_setup_channel - setup new channel using uplink element
7683 * @pf: ptr to PF device
7684 * @vsi: the VSI being setup
7685 * @ch: ptr to channel structure
7687 * Setup new channel (VSI) based on specified type (VMDq2/VF)
7688 * and uplink switching element
7691 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
7692 struct ice_channel *ch)
7694 struct device *dev = ice_pf_to_dev(pf);
7698 if (vsi->type != ICE_VSI_PF) {
7699 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
7703 sw_id = pf->first_sw->sw_id;
7705 /* create channel (VSI) */
7706 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
7708 dev_err(dev, "failed to setup hw_channel\n");
7711 dev_dbg(dev, "successfully created channel()\n");
7713 return ch->ch_vsi ? true : false;
7717 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
7718 * @vsi: VSI to be configured
7719 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
7720 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
7723 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
7727 err = ice_set_min_bw_limit(vsi, min_tx_rate);
7731 return ice_set_max_bw_limit(vsi, max_tx_rate);
7735 * ice_create_q_channel - function to create channel
7736 * @vsi: VSI to be configured
7737 * @ch: ptr to channel (it contains channel specific params)
7739 * This function creates channel (VSI) using num_queues specified by user,
7740 * reconfigs RSS if needed.
7742 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
7744 struct ice_pf *pf = vsi->back;
7750 dev = ice_pf_to_dev(pf);
7751 if (!ch->num_txq || !ch->num_rxq) {
7752 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
7756 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
7757 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
7758 vsi->cnt_q_avail, ch->num_txq);
7762 if (!ice_setup_channel(pf, vsi, ch)) {
7763 dev_info(dev, "Failed to setup channel\n");
7766 /* configure BW rate limit */
7767 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
7770 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
7773 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
7774 ch->max_tx_rate, ch->ch_vsi->vsi_num);
7776 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
7777 ch->max_tx_rate, ch->ch_vsi->vsi_num);
7780 vsi->cnt_q_avail -= ch->num_txq;
7786 * ice_rem_all_chnl_fltrs - removes all channel filters
7787 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
7789 * Remove all advanced switch filters only if they are channel specific
7790 * tc-flower based filter
7792 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
7794 struct ice_tc_flower_fltr *fltr;
7795 struct hlist_node *node;
7797 /* to remove all channel filters, iterate an ordered list of filters */
7798 hlist_for_each_entry_safe(fltr, node,
7799 &pf->tc_flower_fltr_list,
7801 struct ice_rule_query_data rule;
7804 /* for now process only channel specific filters */
7805 if (!ice_is_chnl_fltr(fltr))
7808 rule.rid = fltr->rid;
7809 rule.rule_id = fltr->rule_id;
7810 rule.vsi_handle = fltr->dest_id;
7811 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
7813 if (status == -ENOENT)
7814 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
7817 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
7819 } else if (fltr->dest_vsi) {
7820 /* update advanced switch filter count */
7821 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
7822 u32 flags = fltr->flags;
7824 fltr->dest_vsi->num_chnl_fltr--;
7825 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
7826 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
7827 pf->num_dmac_chnl_fltrs--;
7831 hlist_del(&fltr->tc_flower_node);
7837 * ice_remove_q_channels - Remove queue channels for the TCs
7838 * @vsi: VSI to be configured
7839 * @rem_fltr: delete advanced switch filter or not
7841 * Remove queue channels for the TCs
7843 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
7845 struct ice_channel *ch, *ch_tmp;
7846 struct ice_pf *pf = vsi->back;
7849 /* remove all tc-flower based filter if they are channel filters only */
7851 ice_rem_all_chnl_fltrs(pf);
7853 /* remove ntuple filters since queue configuration is being changed */
7854 if (vsi->netdev->features & NETIF_F_NTUPLE) {
7855 struct ice_hw *hw = &pf->hw;
7857 mutex_lock(&hw->fdir_fltr_lock);
7858 ice_fdir_del_all_fltrs(vsi);
7859 mutex_unlock(&hw->fdir_fltr_lock);
7862 /* perform cleanup for channels if they exist */
7863 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
7864 struct ice_vsi *ch_vsi;
7866 list_del(&ch->list);
7867 ch_vsi = ch->ch_vsi;
7873 /* Reset queue contexts */
7874 for (i = 0; i < ch->num_rxq; i++) {
7875 struct ice_tx_ring *tx_ring;
7876 struct ice_rx_ring *rx_ring;
7878 tx_ring = vsi->tx_rings[ch->base_q + i];
7879 rx_ring = vsi->rx_rings[ch->base_q + i];
7882 if (tx_ring->q_vector)
7883 tx_ring->q_vector->ch = NULL;
7887 if (rx_ring->q_vector)
7888 rx_ring->q_vector->ch = NULL;
7892 /* Release FD resources for the channel VSI */
7893 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
7895 /* clear the VSI from scheduler tree */
7896 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
7898 /* Delete VSI from FW */
7899 ice_vsi_delete(ch->ch_vsi);
7901 /* Delete VSI from PF and HW VSI arrays */
7902 ice_vsi_clear(ch->ch_vsi);
7904 /* free the channel */
7908 /* clear the channel VSI map which is stored in main VSI */
7909 ice_for_each_chnl_tc(i)
7910 vsi->tc_map_vsi[i] = NULL;
7912 /* reset main VSI's all TC information */
7918 * ice_rebuild_channels - rebuild channel
7921 * Recreate channel VSIs and replay filters
7923 static int ice_rebuild_channels(struct ice_pf *pf)
7925 struct device *dev = ice_pf_to_dev(pf);
7926 struct ice_vsi *main_vsi;
7927 bool rem_adv_fltr = true;
7928 struct ice_channel *ch;
7929 struct ice_vsi *vsi;
7933 main_vsi = ice_get_main_vsi(pf);
7937 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
7938 main_vsi->old_numtc == 1)
7939 return 0; /* nothing to be done */
7941 /* reconfigure main VSI based on old value of TC and cached values
7944 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
7946 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
7947 main_vsi->old_ena_tc, main_vsi->vsi_num);
7951 /* rebuild ADQ VSIs */
7952 ice_for_each_vsi(pf, i) {
7953 enum ice_vsi_type type;
7956 if (!vsi || vsi->type != ICE_VSI_CHNL)
7961 /* rebuild ADQ VSI */
7962 err = ice_vsi_rebuild(vsi, true);
7964 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
7965 ice_vsi_type_str(type), vsi->idx, err);
7969 /* Re-map HW VSI number, using VSI handle that has been
7970 * previously validated in ice_replay_vsi() call above
7972 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7974 /* replay filters for the VSI */
7975 err = ice_replay_vsi(&pf->hw, vsi->idx);
7977 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
7978 ice_vsi_type_str(type), err, vsi->idx);
7979 rem_adv_fltr = false;
7982 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
7983 ice_vsi_type_str(type), vsi->idx);
7985 /* store ADQ VSI at correct TC index in main VSI's
7988 main_vsi->tc_map_vsi[tc_idx++] = vsi;
7991 /* ADQ VSI(s) has been rebuilt successfully, so setup
7992 * channel for main VSI's Tx and Rx rings
7994 list_for_each_entry(ch, &main_vsi->ch_list, list) {
7995 struct ice_vsi *ch_vsi;
7997 ch_vsi = ch->ch_vsi;
8001 /* reconfig channel resources */
8002 ice_cfg_chnl_all_res(main_vsi, ch);
8004 /* replay BW rate limit if it is non-zero */
8005 if (!ch->max_tx_rate && !ch->min_tx_rate)
8008 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8011 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8012 err, ch->max_tx_rate, ch->min_tx_rate,
8015 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8016 ch->max_tx_rate, ch->min_tx_rate,
8020 /* reconfig RSS for main VSI */
8021 if (main_vsi->ch_rss_size)
8022 ice_vsi_cfg_rss_lut_key(main_vsi);
8027 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8032 * ice_create_q_channels - Add queue channel for the given TCs
8033 * @vsi: VSI to be configured
8035 * Configures queue channel mapping to the given TCs
8037 static int ice_create_q_channels(struct ice_vsi *vsi)
8039 struct ice_pf *pf = vsi->back;
8040 struct ice_channel *ch;
8043 ice_for_each_chnl_tc(i) {
8044 if (!(vsi->all_enatc & BIT(i)))
8047 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8052 INIT_LIST_HEAD(&ch->list);
8053 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8054 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8055 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8056 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8057 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8059 /* convert to Kbits/s */
8060 if (ch->max_tx_rate)
8061 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8062 ICE_BW_KBPS_DIVISOR);
8063 if (ch->min_tx_rate)
8064 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8065 ICE_BW_KBPS_DIVISOR);
8067 ret = ice_create_q_channel(vsi, ch);
8069 dev_err(ice_pf_to_dev(pf),
8070 "failed creating channel TC:%d\n", i);
8074 list_add_tail(&ch->list, &vsi->ch_list);
8075 vsi->tc_map_vsi[i] = ch->ch_vsi;
8076 dev_dbg(ice_pf_to_dev(pf),
8077 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8082 ice_remove_q_channels(vsi, false);
8088 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8089 * @netdev: net device to configure
8090 * @type_data: TC offload data
8092 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8094 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8095 struct ice_netdev_priv *np = netdev_priv(netdev);
8096 struct ice_vsi *vsi = np->vsi;
8097 struct ice_pf *pf = vsi->back;
8098 u16 mode, ena_tc_qdisc = 0;
8099 int cur_txq, cur_rxq;
8104 dev = ice_pf_to_dev(pf);
8105 num_tcf = mqprio_qopt->qopt.num_tc;
8106 hw = mqprio_qopt->qopt.hw;
8107 mode = mqprio_qopt->mode;
8109 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8110 vsi->ch_rss_size = 0;
8111 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8115 /* Generate queue region map for number of TCF requested */
8116 for (i = 0; i < num_tcf; i++)
8117 ena_tc_qdisc |= BIT(i);
8120 case TC_MQPRIO_MODE_CHANNEL:
8122 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8124 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8128 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8129 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8130 /* don't assume state of hw_tc_offload during driver load
8131 * and set the flag for TC flower filter if hw_tc_offload
8134 if (vsi->netdev->features & NETIF_F_HW_TC)
8135 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8143 /* Requesting same TCF configuration as already enabled */
8144 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8145 mode != TC_MQPRIO_MODE_CHANNEL)
8148 /* Pause VSI queues */
8149 ice_dis_vsi(vsi, true);
8151 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8152 ice_remove_q_channels(vsi, true);
8154 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8155 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8157 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8160 /* logic to rebuild VSI, same like ethtool -L */
8161 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8163 for (i = 0; i < num_tcf; i++) {
8164 if (!(ena_tc_qdisc & BIT(i)))
8167 offset = vsi->mqprio_qopt.qopt.offset[i];
8168 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8169 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8171 vsi->req_txq = offset + qcount_tx;
8172 vsi->req_rxq = offset + qcount_rx;
8174 /* store away original rss_size info, so that it gets reused
8175 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8176 * determine, what should be the rss_sizefor main VSI
8178 vsi->orig_rss_size = vsi->rss_size;
8181 /* save current values of Tx and Rx queues before calling VSI rebuild
8182 * for fallback option
8184 cur_txq = vsi->num_txq;
8185 cur_rxq = vsi->num_rxq;
8187 /* proceed with rebuild main VSI using correct number of queues */
8188 ret = ice_vsi_rebuild(vsi, false);
8190 /* fallback to current number of queues */
8191 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8192 vsi->req_txq = cur_txq;
8193 vsi->req_rxq = cur_rxq;
8194 clear_bit(ICE_RESET_FAILED, pf->state);
8195 if (ice_vsi_rebuild(vsi, false)) {
8196 dev_err(dev, "Rebuild of main VSI failed again\n");
8201 vsi->all_numtc = num_tcf;
8202 vsi->all_enatc = ena_tc_qdisc;
8203 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8205 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8210 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8211 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8212 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8214 /* set TC0 rate limit if specified */
8215 if (max_tx_rate || min_tx_rate) {
8216 /* convert to Kbits/s */
8218 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8220 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8222 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8224 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8225 max_tx_rate, min_tx_rate, vsi->vsi_num);
8227 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8228 max_tx_rate, min_tx_rate, vsi->vsi_num);
8232 ret = ice_create_q_channels(vsi);
8234 netdev_err(netdev, "failed configuring queue channels\n");
8237 netdev_dbg(netdev, "successfully configured channels\n");
8241 if (vsi->ch_rss_size)
8242 ice_vsi_cfg_rss_lut_key(vsi);
8245 /* if error, reset the all_numtc and all_enatc */
8251 ice_ena_vsi(vsi, true);
8256 static LIST_HEAD(ice_block_cb_list);
8259 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8262 struct ice_netdev_priv *np = netdev_priv(netdev);
8263 struct ice_pf *pf = np->vsi->back;
8267 case TC_SETUP_BLOCK:
8268 return flow_block_cb_setup_simple(type_data,
8270 ice_setup_tc_block_cb,
8272 case TC_SETUP_QDISC_MQPRIO:
8273 /* setup traffic classifier for receive side */
8274 mutex_lock(&pf->tc_mutex);
8275 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8276 mutex_unlock(&pf->tc_mutex);
8284 static struct ice_indr_block_priv *
8285 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8286 struct net_device *netdev)
8288 struct ice_indr_block_priv *cb_priv;
8290 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8291 if (!cb_priv->netdev)
8293 if (cb_priv->netdev == netdev)
8300 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8303 struct ice_indr_block_priv *priv = indr_priv;
8304 struct ice_netdev_priv *np = priv->np;
8307 case TC_SETUP_CLSFLOWER:
8308 return ice_setup_tc_cls_flower(np, priv->netdev,
8309 (struct flow_cls_offload *)
8317 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8318 struct ice_netdev_priv *np,
8319 struct flow_block_offload *f, void *data,
8320 void (*cleanup)(struct flow_block_cb *block_cb))
8322 struct ice_indr_block_priv *indr_priv;
8323 struct flow_block_cb *block_cb;
8325 if (!ice_is_tunnel_supported(netdev) &&
8326 !(is_vlan_dev(netdev) &&
8327 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8330 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8333 switch (f->command) {
8334 case FLOW_BLOCK_BIND:
8335 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8339 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8343 indr_priv->netdev = netdev;
8345 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8348 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8349 indr_priv, indr_priv,
8350 ice_rep_indr_tc_block_unbind,
8351 f, netdev, sch, data, np,
8354 if (IS_ERR(block_cb)) {
8355 list_del(&indr_priv->list);
8357 return PTR_ERR(block_cb);
8359 flow_block_cb_add(block_cb, f);
8360 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8362 case FLOW_BLOCK_UNBIND:
8363 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8367 block_cb = flow_block_cb_lookup(f->block,
8368 ice_indr_setup_block_cb,
8373 flow_indr_block_cb_remove(block_cb, f);
8375 list_del(&block_cb->driver_list);
8384 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8385 void *cb_priv, enum tc_setup_type type, void *type_data,
8387 void (*cleanup)(struct flow_block_cb *block_cb))
8390 case TC_SETUP_BLOCK:
8391 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8400 * ice_open - Called when a network interface becomes active
8401 * @netdev: network interface device structure
8403 * The open entry point is called when a network interface is made
8404 * active by the system (IFF_UP). At this point all resources needed
8405 * for transmit and receive operations are allocated, the interrupt
8406 * handler is registered with the OS, the netdev watchdog is enabled,
8407 * and the stack is notified that the interface is ready.
8409 * Returns 0 on success, negative value on failure
8411 int ice_open(struct net_device *netdev)
8413 struct ice_netdev_priv *np = netdev_priv(netdev);
8414 struct ice_pf *pf = np->vsi->back;
8416 if (ice_is_reset_in_progress(pf->state)) {
8417 netdev_err(netdev, "can't open net device while reset is in progress");
8421 return ice_open_internal(netdev);
8425 * ice_open_internal - Called when a network interface becomes active
8426 * @netdev: network interface device structure
8428 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8431 * Returns 0 on success, negative value on failure
8433 int ice_open_internal(struct net_device *netdev)
8435 struct ice_netdev_priv *np = netdev_priv(netdev);
8436 struct ice_vsi *vsi = np->vsi;
8437 struct ice_pf *pf = vsi->back;
8438 struct ice_port_info *pi;
8441 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
8442 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8446 netif_carrier_off(netdev);
8448 pi = vsi->port_info;
8449 err = ice_update_link_info(pi);
8451 netdev_err(netdev, "Failed to get link info, error %d\n", err);
8455 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
8457 /* Set PHY if there is media, otherwise, turn off PHY */
8458 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
8459 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8460 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
8461 err = ice_init_phy_user_cfg(pi);
8463 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
8469 err = ice_configure_phy(vsi);
8471 netdev_err(netdev, "Failed to set physical link up, error %d\n",
8476 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
8477 ice_set_link(vsi, false);
8480 err = ice_vsi_open(vsi);
8482 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
8483 vsi->vsi_num, vsi->vsw->sw_id);
8485 /* Update existing tunnels information */
8486 udp_tunnel_get_rx_info(netdev);
8492 * ice_stop - Disables a network interface
8493 * @netdev: network interface device structure
8495 * The stop entry point is called when an interface is de-activated by the OS,
8496 * and the netdevice enters the DOWN state. The hardware is still under the
8497 * driver's control, but the netdev interface is disabled.
8499 * Returns success only - not allowed to fail
8501 int ice_stop(struct net_device *netdev)
8503 struct ice_netdev_priv *np = netdev_priv(netdev);
8504 struct ice_vsi *vsi = np->vsi;
8505 struct ice_pf *pf = vsi->back;
8507 if (ice_is_reset_in_progress(pf->state)) {
8508 netdev_err(netdev, "can't stop net device while reset is in progress");
8518 * ice_features_check - Validate encapsulated packet conforms to limits
8520 * @netdev: This port's netdev
8521 * @features: Offload features that the stack believes apply
8523 static netdev_features_t
8524 ice_features_check(struct sk_buff *skb,
8525 struct net_device __always_unused *netdev,
8526 netdev_features_t features)
8530 /* No point in doing any of this if neither checksum nor GSO are
8531 * being requested for this frame. We can rule out both by just
8532 * checking for CHECKSUM_PARTIAL
8534 if (skb->ip_summed != CHECKSUM_PARTIAL)
8537 /* We cannot support GSO if the MSS is going to be less than
8538 * 64 bytes. If it is then we need to drop support for GSO.
8540 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
8541 features &= ~NETIF_F_GSO_MASK;
8543 len = skb_network_header(skb) - skb->data;
8544 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
8545 goto out_rm_features;
8547 len = skb_transport_header(skb) - skb_network_header(skb);
8548 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8549 goto out_rm_features;
8551 if (skb->encapsulation) {
8552 len = skb_inner_network_header(skb) - skb_transport_header(skb);
8553 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
8554 goto out_rm_features;
8556 len = skb_inner_transport_header(skb) -
8557 skb_inner_network_header(skb);
8558 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
8559 goto out_rm_features;
8564 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
8567 static const struct net_device_ops ice_netdev_safe_mode_ops = {
8568 .ndo_open = ice_open,
8569 .ndo_stop = ice_stop,
8570 .ndo_start_xmit = ice_start_xmit,
8571 .ndo_set_mac_address = ice_set_mac_address,
8572 .ndo_validate_addr = eth_validate_addr,
8573 .ndo_change_mtu = ice_change_mtu,
8574 .ndo_get_stats64 = ice_get_stats64,
8575 .ndo_tx_timeout = ice_tx_timeout,
8576 .ndo_bpf = ice_xdp_safe_mode,
8579 static const struct net_device_ops ice_netdev_ops = {
8580 .ndo_open = ice_open,
8581 .ndo_stop = ice_stop,
8582 .ndo_start_xmit = ice_start_xmit,
8583 .ndo_select_queue = ice_select_queue,
8584 .ndo_features_check = ice_features_check,
8585 .ndo_set_rx_mode = ice_set_rx_mode,
8586 .ndo_set_mac_address = ice_set_mac_address,
8587 .ndo_validate_addr = eth_validate_addr,
8588 .ndo_change_mtu = ice_change_mtu,
8589 .ndo_get_stats64 = ice_get_stats64,
8590 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
8591 .ndo_eth_ioctl = ice_eth_ioctl,
8592 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
8593 .ndo_set_vf_mac = ice_set_vf_mac,
8594 .ndo_get_vf_config = ice_get_vf_cfg,
8595 .ndo_set_vf_trust = ice_set_vf_trust,
8596 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
8597 .ndo_set_vf_link_state = ice_set_vf_link_state,
8598 .ndo_get_vf_stats = ice_get_vf_stats,
8599 .ndo_set_vf_rate = ice_set_vf_bw,
8600 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
8601 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
8602 .ndo_setup_tc = ice_setup_tc,
8603 .ndo_set_features = ice_set_features,
8604 .ndo_bridge_getlink = ice_bridge_getlink,
8605 .ndo_bridge_setlink = ice_bridge_setlink,
8606 .ndo_fdb_add = ice_fdb_add,
8607 .ndo_fdb_del = ice_fdb_del,
8608 #ifdef CONFIG_RFS_ACCEL
8609 .ndo_rx_flow_steer = ice_rx_flow_steer,
8611 .ndo_tx_timeout = ice_tx_timeout,
8613 .ndo_xdp_xmit = ice_xdp_xmit,
8614 .ndo_xsk_wakeup = ice_xsk_wakeup,