Commit | Line | Data |
---|---|---|
837f08fd | 1 | // SPDX-License-Identifier: GPL-2.0 |
ba1124f5 | 2 | /* Copyright (c) 2018-2023, Intel Corporation. */ |
837f08fd AV |
3 | |
4 | /* Intel(R) Ethernet Connection E800 Series Linux Driver */ | |
5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | ||
34a2a3b8 | 8 | #include <generated/utsrelease.h> |
0288c3e7 | 9 | #include <linux/crash_dump.h> |
837f08fd | 10 | #include "ice.h" |
eff380aa | 11 | #include "ice_base.h" |
45d3d428 | 12 | #include "ice_lib.h" |
1b8f15b6 | 13 | #include "ice_fltr.h" |
37b6f646 | 14 | #include "ice_dcb_lib.h" |
b94b013e | 15 | #include "ice_dcb_nl.h" |
1adf7ead | 16 | #include "ice_devlink.h" |
4da71a77 | 17 | #include "ice_hwmon.h" |
3089cf6d JB |
18 | /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the |
19 | * ice tracepoint functions. This must be done exactly once across the | |
20 | * ice driver. | |
21 | */ | |
22 | #define CREATE_TRACE_POINTS | |
23 | #include "ice_trace.h" | |
b3be918d | 24 | #include "ice_eswitch.h" |
0d08a441 | 25 | #include "ice_tc_lib.h" |
c31af68a | 26 | #include "ice_vsi_vlan_ops.h" |
66c0e13a | 27 | #include <net/xdp_sock_drv.h> |
837f08fd | 28 | |
837f08fd | 29 | #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" |
837f08fd AV |
30 | static const char ice_driver_string[] = DRV_SUMMARY; |
31 | static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; | |
32 | ||
462acf6a TN |
33 | /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ |
34 | #define ICE_DDP_PKG_PATH "intel/ice/ddp/" | |
35 | #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" | |
36 | ||
837f08fd AV |
37 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
38 | MODULE_DESCRIPTION(DRV_SUMMARY); | |
98674ebe | 39 | MODULE_LICENSE("GPL v2"); |
462acf6a | 40 | MODULE_FIRMWARE(ICE_DDP_PKG_FILE); |
837f08fd AV |
41 | |
42 | static int debug = -1; | |
43 | module_param(debug, int, 0644); | |
7ec59eea AV |
44 | #ifndef CONFIG_DYNAMIC_DEBUG |
45 | MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); | |
46 | #else | |
47 | MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); | |
48 | #endif /* !CONFIG_DYNAMIC_DEBUG */ | |
837f08fd | 49 | |
22bf877e MF |
50 | DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); |
51 | EXPORT_SYMBOL(ice_xdp_locking_key); | |
d25a0fc4 | 52 | |
649c87c6 JK |
53 | /** |
54 | * ice_hw_to_dev - Get device pointer from the hardware structure | |
55 | * @hw: pointer to the device HW structure | |
56 | * | |
57 | * Used to access the device pointer from compilation units which can't easily | |
58 | * include the definition of struct ice_pf without leading to circular header | |
59 | * dependencies. | |
60 | */ | |
61 | struct device *ice_hw_to_dev(struct ice_hw *hw) | |
62 | { | |
63 | struct ice_pf *pf = container_of(hw, struct ice_pf, hw); | |
64 | ||
65 | return &pf->pdev->dev; | |
66 | } | |
67 | ||
940b61af | 68 | static struct workqueue_struct *ice_wq; |
bb52f42a | 69 | struct workqueue_struct *ice_lag_wq; |
462acf6a | 70 | static const struct net_device_ops ice_netdev_safe_mode_ops; |
cdedef59 | 71 | static const struct net_device_ops ice_netdev_ops; |
940b61af | 72 | |
462acf6a | 73 | static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); |
28c2a645 | 74 | |
0f9d5027 | 75 | static void ice_vsi_release_all(struct ice_pf *pf); |
3a858ba3 | 76 | |
fbc7b27a KP |
77 | static int ice_rebuild_channels(struct ice_pf *pf); |
78 | static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr); | |
79 | ||
195bb48f MS |
80 | static int |
81 | ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, | |
82 | void *cb_priv, enum tc_setup_type type, void *type_data, | |
83 | void *data, | |
84 | void (*cleanup)(struct flow_block_cb *block_cb)); | |
85 | ||
f6e8fb55 | 86 | bool netif_is_ice(const struct net_device *dev) |
df006dd4 DE |
87 | { |
88 | return dev && (dev->netdev_ops == &ice_netdev_ops); | |
89 | } | |
90 | ||
b3969fd7 SM |
91 | /** |
92 | * ice_get_tx_pending - returns number of Tx descriptors not processed | |
93 | * @ring: the ring of descriptors | |
94 | */ | |
e72bba21 | 95 | static u16 ice_get_tx_pending(struct ice_tx_ring *ring) |
b3969fd7 | 96 | { |
c1ddf1f5 | 97 | u16 head, tail; |
b3969fd7 SM |
98 | |
99 | head = ring->next_to_clean; | |
c1ddf1f5 | 100 | tail = ring->next_to_use; |
b3969fd7 SM |
101 | |
102 | if (head != tail) | |
103 | return (head < tail) ? | |
104 | tail - head : (tail + ring->count - head); | |
105 | return 0; | |
106 | } | |
107 | ||
108 | /** | |
109 | * ice_check_for_hang_subtask - check for and recover hung queues | |
110 | * @pf: pointer to PF struct | |
111 | */ | |
112 | static void ice_check_for_hang_subtask(struct ice_pf *pf) | |
113 | { | |
114 | struct ice_vsi *vsi = NULL; | |
e89e899f | 115 | struct ice_hw *hw; |
b3969fd7 | 116 | unsigned int i; |
b3969fd7 | 117 | int packets; |
e89e899f | 118 | u32 v; |
b3969fd7 SM |
119 | |
120 | ice_for_each_vsi(pf, v) | |
121 | if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { | |
122 | vsi = pf->vsi[v]; | |
123 | break; | |
124 | } | |
125 | ||
e97fb1ae | 126 | if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) |
b3969fd7 SM |
127 | return; |
128 | ||
129 | if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) | |
130 | return; | |
131 | ||
e89e899f BC |
132 | hw = &vsi->back->hw; |
133 | ||
2faf63b6 | 134 | ice_for_each_txq(vsi, i) { |
e72bba21 | 135 | struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; |
288ecf49 | 136 | struct ice_ring_stats *ring_stats; |
b3969fd7 | 137 | |
fbc7b27a KP |
138 | if (!tx_ring) |
139 | continue; | |
140 | if (ice_ring_ch_enabled(tx_ring)) | |
141 | continue; | |
142 | ||
288ecf49 BM |
143 | ring_stats = tx_ring->ring_stats; |
144 | if (!ring_stats) | |
145 | continue; | |
146 | ||
fbc7b27a | 147 | if (tx_ring->desc) { |
b3969fd7 SM |
148 | /* If packet counter has not changed the queue is |
149 | * likely stalled, so force an interrupt for this | |
150 | * queue. | |
151 | * | |
152 | * prev_pkt would be negative if there was no | |
153 | * pending work. | |
154 | */ | |
288ecf49 BM |
155 | packets = ring_stats->stats.pkts & INT_MAX; |
156 | if (ring_stats->tx_stats.prev_pkt == packets) { | |
b3969fd7 | 157 | /* Trigger sw interrupt to revive the queue */ |
e89e899f | 158 | ice_trigger_sw_intr(hw, tx_ring->q_vector); |
b3969fd7 SM |
159 | continue; |
160 | } | |
161 | ||
162 | /* Memory barrier between read of packet count and call | |
163 | * to ice_get_tx_pending() | |
164 | */ | |
165 | smp_rmb(); | |
288ecf49 | 166 | ring_stats->tx_stats.prev_pkt = |
b3969fd7 SM |
167 | ice_get_tx_pending(tx_ring) ? packets : -1; |
168 | } | |
169 | } | |
170 | } | |
171 | ||
561f4379 TN |
172 | /** |
173 | * ice_init_mac_fltr - Set initial MAC filters | |
174 | * @pf: board private structure | |
175 | * | |
2f2da36e | 176 | * Set initial set of MAC filters for PF VSI; configure filters for permanent |
561f4379 TN |
177 | * address and broadcast address. If an error is encountered, netdevice will be |
178 | * unregistered. | |
179 | */ | |
180 | static int ice_init_mac_fltr(struct ice_pf *pf) | |
181 | { | |
561f4379 | 182 | struct ice_vsi *vsi; |
1b8f15b6 | 183 | u8 *perm_addr; |
561f4379 | 184 | |
208ff751 | 185 | vsi = ice_get_main_vsi(pf); |
561f4379 TN |
186 | if (!vsi) |
187 | return -EINVAL; | |
188 | ||
1b8f15b6 | 189 | perm_addr = vsi->port_info->mac.perm_addr; |
c1484691 | 190 | return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); |
561f4379 TN |
191 | } |
192 | ||
e94d4478 | 193 | /** |
f9867df6 | 194 | * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced |
e94d4478 | 195 | * @netdev: the net device on which the sync is happening |
f9867df6 | 196 | * @addr: MAC address to sync |
e94d4478 AV |
197 | * |
198 | * This is a callback function which is called by the in kernel device sync | |
199 | * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only | |
200 | * populates the tmp_sync_list, which is later used by ice_add_mac to add the | |
f9867df6 | 201 | * MAC filters from the hardware. |
e94d4478 AV |
202 | */ |
203 | static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) | |
204 | { | |
205 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
206 | struct ice_vsi *vsi = np->vsi; | |
207 | ||
1b8f15b6 MS |
208 | if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, |
209 | ICE_FWD_TO_VSI)) | |
e94d4478 AV |
210 | return -EINVAL; |
211 | ||
212 | return 0; | |
213 | } | |
214 | ||
215 | /** | |
f9867df6 | 216 | * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced |
e94d4478 | 217 | * @netdev: the net device on which the unsync is happening |
f9867df6 | 218 | * @addr: MAC address to unsync |
e94d4478 AV |
219 | * |
220 | * This is a callback function which is called by the in kernel device unsync | |
221 | * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only | |
222 | * populates the tmp_unsync_list, which is later used by ice_remove_mac to | |
f9867df6 | 223 | * delete the MAC filters from the hardware. |
e94d4478 AV |
224 | */ |
225 | static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) | |
226 | { | |
227 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
228 | struct ice_vsi *vsi = np->vsi; | |
229 | ||
3ba7f53f BC |
230 | /* Under some circumstances, we might receive a request to delete our |
231 | * own device address from our uc list. Because we store the device | |
232 | * address in the VSI's MAC filter list, we need to ignore such | |
233 | * requests and not delete our device address from this list. | |
234 | */ | |
235 | if (ether_addr_equal(addr, netdev->dev_addr)) | |
236 | return 0; | |
237 | ||
1b8f15b6 MS |
238 | if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, |
239 | ICE_FWD_TO_VSI)) | |
e94d4478 AV |
240 | return -EINVAL; |
241 | ||
242 | return 0; | |
243 | } | |
244 | ||
e94d4478 AV |
245 | /** |
246 | * ice_vsi_fltr_changed - check if filter state changed | |
247 | * @vsi: VSI to be checked | |
248 | * | |
249 | * returns true if filter state has changed, false otherwise. | |
250 | */ | |
251 | static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) | |
252 | { | |
e97fb1ae | 253 | return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || |
1273f895 | 254 | test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); |
e94d4478 AV |
255 | } |
256 | ||
5eda8afd | 257 | /** |
fabf480b | 258 | * ice_set_promisc - Enable promiscuous mode for a given PF |
5eda8afd AA |
259 | * @vsi: the VSI being configured |
260 | * @promisc_m: mask of promiscuous config bits | |
5eda8afd AA |
261 | * |
262 | */ | |
fabf480b | 263 | static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m) |
5eda8afd | 264 | { |
5e24d598 | 265 | int status; |
5eda8afd AA |
266 | |
267 | if (vsi->type != ICE_VSI_PF) | |
268 | return 0; | |
269 | ||
1273f895 IV |
270 | if (ice_vsi_has_non_zero_vlans(vsi)) { |
271 | promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); | |
272 | status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, | |
273 | promisc_m); | |
274 | } else { | |
275 | status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, | |
276 | promisc_m, 0); | |
277 | } | |
abddafd4 GS |
278 | if (status && status != -EEXIST) |
279 | return status; | |
1273f895 | 280 | |
43fbca02 JB |
281 | netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", |
282 | vsi->vsi_num, promisc_m); | |
abddafd4 | 283 | return 0; |
fabf480b BC |
284 | } |
285 | ||
286 | /** | |
287 | * ice_clear_promisc - Disable promiscuous mode for a given PF | |
288 | * @vsi: the VSI being configured | |
289 | * @promisc_m: mask of promiscuous config bits | |
290 | * | |
291 | */ | |
292 | static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) | |
293 | { | |
5e24d598 | 294 | int status; |
fabf480b BC |
295 | |
296 | if (vsi->type != ICE_VSI_PF) | |
297 | return 0; | |
5eda8afd | 298 | |
1273f895 IV |
299 | if (ice_vsi_has_non_zero_vlans(vsi)) { |
300 | promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX); | |
301 | status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, | |
302 | promisc_m); | |
303 | } else { | |
304 | status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, | |
305 | promisc_m, 0); | |
306 | } | |
307 | ||
43fbca02 JB |
308 | netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", |
309 | vsi->vsi_num, promisc_m); | |
c1484691 | 310 | return status; |
5eda8afd AA |
311 | } |
312 | ||
e94d4478 AV |
313 | /** |
314 | * ice_vsi_sync_fltr - Update the VSI filter list to the HW | |
315 | * @vsi: ptr to the VSI | |
316 | * | |
317 | * Push any outstanding VSI filter changes through the AdminQ. | |
318 | */ | |
319 | static int ice_vsi_sync_fltr(struct ice_vsi *vsi) | |
320 | { | |
c31af68a | 321 | struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
9a946843 | 322 | struct device *dev = ice_pf_to_dev(vsi->back); |
e94d4478 AV |
323 | struct net_device *netdev = vsi->netdev; |
324 | bool promisc_forced_on = false; | |
325 | struct ice_pf *pf = vsi->back; | |
326 | struct ice_hw *hw = &pf->hw; | |
e94d4478 | 327 | u32 changed_flags = 0; |
2ccc1c1c | 328 | int err; |
e94d4478 AV |
329 | |
330 | if (!vsi->netdev) | |
331 | return -EINVAL; | |
332 | ||
7e408e07 | 333 | while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) |
e94d4478 AV |
334 | usleep_range(1000, 2000); |
335 | ||
336 | changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; | |
337 | vsi->current_netdev_flags = vsi->netdev->flags; | |
338 | ||
339 | INIT_LIST_HEAD(&vsi->tmp_sync_list); | |
340 | INIT_LIST_HEAD(&vsi->tmp_unsync_list); | |
341 | ||
342 | if (ice_vsi_fltr_changed(vsi)) { | |
e97fb1ae AV |
343 | clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
344 | clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); | |
e94d4478 AV |
345 | |
346 | /* grab the netdev's addr_list_lock */ | |
347 | netif_addr_lock_bh(netdev); | |
348 | __dev_uc_sync(netdev, ice_add_mac_to_sync_list, | |
349 | ice_add_mac_to_unsync_list); | |
350 | __dev_mc_sync(netdev, ice_add_mac_to_sync_list, | |
351 | ice_add_mac_to_unsync_list); | |
352 | /* our temp lists are populated. release lock */ | |
353 | netif_addr_unlock_bh(netdev); | |
354 | } | |
355 | ||
f9867df6 | 356 | /* Remove MAC addresses in the unsync list */ |
2ccc1c1c | 357 | err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); |
1b8f15b6 | 358 | ice_fltr_free_list(dev, &vsi->tmp_unsync_list); |
2ccc1c1c | 359 | if (err) { |
e94d4478 AV |
360 | netdev_err(netdev, "Failed to delete MAC filters\n"); |
361 | /* if we failed because of alloc failures, just bail */ | |
2ccc1c1c | 362 | if (err == -ENOMEM) |
e94d4478 | 363 | goto out; |
e94d4478 AV |
364 | } |
365 | ||
f9867df6 | 366 | /* Add MAC addresses in the sync list */ |
2ccc1c1c | 367 | err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); |
1b8f15b6 | 368 | ice_fltr_free_list(dev, &vsi->tmp_sync_list); |
89f3e4a5 PB |
369 | /* If filter is added successfully or already exists, do not go into |
370 | * 'if' condition and report it as error. Instead continue processing | |
371 | * rest of the function. | |
372 | */ | |
2ccc1c1c | 373 | if (err && err != -EEXIST) { |
e94d4478 | 374 | netdev_err(netdev, "Failed to add MAC filters\n"); |
f9867df6 | 375 | /* If there is no more space for new umac filters, VSI |
e94d4478 AV |
376 | * should go into promiscuous mode. There should be some |
377 | * space reserved for promiscuous filters. | |
378 | */ | |
379 | if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && | |
7e408e07 | 380 | !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, |
e94d4478 AV |
381 | vsi->state)) { |
382 | promisc_forced_on = true; | |
19cce2c6 | 383 | netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", |
e94d4478 AV |
384 | vsi->vsi_num); |
385 | } else { | |
e94d4478 AV |
386 | goto out; |
387 | } | |
388 | } | |
2ccc1c1c | 389 | err = 0; |
e94d4478 | 390 | /* check for changes in promiscuous modes */ |
5eda8afd AA |
391 | if (changed_flags & IFF_ALLMULTI) { |
392 | if (vsi->current_netdev_flags & IFF_ALLMULTI) { | |
1273f895 | 393 | err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS); |
5eda8afd | 394 | if (err) { |
5eda8afd AA |
395 | vsi->current_netdev_flags &= ~IFF_ALLMULTI; |
396 | goto out_promisc; | |
397 | } | |
92ace482 BA |
398 | } else { |
399 | /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ | |
1273f895 | 400 | err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS); |
5eda8afd | 401 | if (err) { |
5eda8afd AA |
402 | vsi->current_netdev_flags |= IFF_ALLMULTI; |
403 | goto out_promisc; | |
404 | } | |
405 | } | |
406 | } | |
e94d4478 AV |
407 | |
408 | if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || | |
e97fb1ae AV |
409 | test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { |
410 | clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); | |
e94d4478 | 411 | if (vsi->current_netdev_flags & IFF_PROMISC) { |
f9867df6 | 412 | /* Apply Rx filter rule to get traffic from wire */ |
d7393425 MW |
413 | if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { |
414 | err = ice_set_dflt_vsi(vsi); | |
fc0f39bc | 415 | if (err && err != -EEXIST) { |
19cce2c6 | 416 | netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", |
fc0f39bc BC |
417 | err, vsi->vsi_num); |
418 | vsi->current_netdev_flags &= | |
419 | ~IFF_PROMISC; | |
420 | goto out_promisc; | |
421 | } | |
2ccc1c1c | 422 | err = 0; |
c31af68a | 423 | vlan_ops->dis_rx_filtering(vsi); |
43fbca02 JB |
424 | |
425 | /* promiscuous mode implies allmulticast so | |
426 | * that VSIs that are in promiscuous mode are | |
427 | * subscribed to multicast packets coming to | |
428 | * the port | |
429 | */ | |
430 | err = ice_set_promisc(vsi, | |
431 | ICE_MCAST_PROMISC_BITS); | |
432 | if (err) | |
433 | goto out_promisc; | |
e94d4478 AV |
434 | } |
435 | } else { | |
f9867df6 | 436 | /* Clear Rx filter to remove traffic from wire */ |
d7393425 MW |
437 | if (ice_is_vsi_dflt_vsi(vsi)) { |
438 | err = ice_clear_dflt_vsi(vsi); | |
fc0f39bc | 439 | if (err) { |
19cce2c6 | 440 | netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", |
fc0f39bc BC |
441 | err, vsi->vsi_num); |
442 | vsi->current_netdev_flags |= | |
443 | IFF_PROMISC; | |
444 | goto out_promisc; | |
445 | } | |
7dc839fe | 446 | if (vsi->netdev->features & |
1babaf77 | 447 | NETIF_F_HW_VLAN_CTAG_FILTER) |
c31af68a | 448 | vlan_ops->ena_rx_filtering(vsi); |
e94d4478 | 449 | } |
43fbca02 JB |
450 | |
451 | /* disable allmulti here, but only if allmulti is not | |
452 | * still enabled for the netdev | |
453 | */ | |
454 | if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { | |
455 | err = ice_clear_promisc(vsi, | |
456 | ICE_MCAST_PROMISC_BITS); | |
457 | if (err) { | |
458 | netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n", | |
459 | err, vsi->vsi_num); | |
460 | } | |
461 | } | |
e94d4478 AV |
462 | } |
463 | } | |
464 | goto exit; | |
465 | ||
466 | out_promisc: | |
e97fb1ae | 467 | set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); |
e94d4478 AV |
468 | goto exit; |
469 | out: | |
470 | /* if something went wrong then set the changed flag so we try again */ | |
e97fb1ae AV |
471 | set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
472 | set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); | |
e94d4478 | 473 | exit: |
7e408e07 | 474 | clear_bit(ICE_CFG_BUSY, vsi->state); |
e94d4478 AV |
475 | return err; |
476 | } | |
477 | ||
478 | /** | |
479 | * ice_sync_fltr_subtask - Sync the VSI filter list with HW | |
480 | * @pf: board private structure | |
481 | */ | |
482 | static void ice_sync_fltr_subtask(struct ice_pf *pf) | |
483 | { | |
484 | int v; | |
485 | ||
486 | if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) | |
487 | return; | |
488 | ||
489 | clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); | |
490 | ||
80ed404a | 491 | ice_for_each_vsi(pf, v) |
e94d4478 AV |
492 | if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && |
493 | ice_vsi_sync_fltr(pf->vsi[v])) { | |
494 | /* come back and try again later */ | |
495 | set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); | |
496 | break; | |
497 | } | |
498 | } | |
499 | ||
7b9ffc76 AV |
500 | /** |
501 | * ice_pf_dis_all_vsi - Pause all VSIs on a PF | |
502 | * @pf: the PF | |
503 | * @locked: is the rtnl_lock already held | |
504 | */ | |
7b9ffc76 | 505 | static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) |
7b9ffc76 | 506 | { |
b126bd6b | 507 | int node; |
7b9ffc76 AV |
508 | int v; |
509 | ||
510 | ice_for_each_vsi(pf, v) | |
511 | if (pf->vsi[v]) | |
512 | ice_dis_vsi(pf->vsi[v], locked); | |
b126bd6b KP |
513 | |
514 | for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) | |
515 | pf->pf_agg_node[node].num_vsis = 0; | |
516 | ||
517 | for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) | |
518 | pf->vf_agg_node[node].num_vsis = 0; | |
7b9ffc76 AV |
519 | } |
520 | ||
c1e5da5d WD |
521 | /** |
522 | * ice_clear_sw_switch_recipes - clear switch recipes | |
523 | * @pf: board private structure | |
524 | * | |
525 | * Mark switch recipes as not created in sw structures. There are cases where | |
526 | * rules (especially advanced rules) need to be restored, either re-read from | |
527 | * hardware or added again. For example after the reset. 'recp_created' flag | |
528 | * prevents from doing that and need to be cleared upfront. | |
529 | */ | |
530 | static void ice_clear_sw_switch_recipes(struct ice_pf *pf) | |
531 | { | |
532 | struct ice_sw_recipe *recp; | |
533 | u8 i; | |
534 | ||
535 | recp = pf->hw.switch_info->recp_list; | |
536 | for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) | |
537 | recp[i].recp_created = false; | |
538 | } | |
539 | ||
0b28b702 | 540 | /** |
fbc7b27a | 541 | * ice_prepare_for_reset - prep for reset |
0b28b702 | 542 | * @pf: board private structure |
fbc7b27a | 543 | * @reset_type: reset type requested |
0b28b702 AV |
544 | * |
545 | * Inform or close all dependent features in prep for reset. | |
546 | */ | |
547 | static void | |
fbc7b27a | 548 | ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) |
0b28b702 AV |
549 | { |
550 | struct ice_hw *hw = &pf->hw; | |
fbc7b27a | 551 | struct ice_vsi *vsi; |
c4c2c7db JK |
552 | struct ice_vf *vf; |
553 | unsigned int bkt; | |
0b28b702 | 554 | |
fbc7b27a KP |
555 | dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); |
556 | ||
5abac9d7 | 557 | /* already prepared for reset */ |
7e408e07 | 558 | if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) |
5abac9d7 BC |
559 | return; |
560 | ||
f9f5301e DE |
561 | ice_unplug_aux_dev(pf); |
562 | ||
007676b4 AV |
563 | /* Notify VFs of impending reset */ |
564 | if (ice_check_sq_alive(hw, &hw->mailboxq)) | |
565 | ice_vc_notify_reset(pf); | |
566 | ||
c7aeb4d1 | 567 | /* Disable VFs until reset is completed */ |
3d5985a1 | 568 | mutex_lock(&pf->vfs.table_lock); |
c4c2c7db | 569 | ice_for_each_vf(pf, bkt, vf) |
fa4a15c8 | 570 | ice_set_vf_state_dis(vf); |
3d5985a1 | 571 | mutex_unlock(&pf->vfs.table_lock); |
c7aeb4d1 | 572 | |
c1e5da5d WD |
573 | if (ice_is_eswitch_mode_switchdev(pf)) { |
574 | if (reset_type != ICE_RESET_PFR) | |
575 | ice_clear_sw_switch_recipes(pf); | |
576 | } | |
577 | ||
fbc7b27a KP |
578 | /* release ADQ specific HW and SW resources */ |
579 | vsi = ice_get_main_vsi(pf); | |
580 | if (!vsi) | |
581 | goto skip; | |
582 | ||
583 | /* to be on safe side, reset orig_rss_size so that normal flow | |
584 | * of deciding rss_size can take precedence | |
585 | */ | |
586 | vsi->orig_rss_size = 0; | |
587 | ||
588 | if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { | |
589 | if (reset_type == ICE_RESET_PFR) { | |
590 | vsi->old_ena_tc = vsi->all_enatc; | |
591 | vsi->old_numtc = vsi->all_numtc; | |
592 | } else { | |
593 | ice_remove_q_channels(vsi, true); | |
594 | ||
595 | /* for other reset type, do not support channel rebuild | |
596 | * hence reset needed info | |
597 | */ | |
598 | vsi->old_ena_tc = 0; | |
599 | vsi->all_enatc = 0; | |
600 | vsi->old_numtc = 0; | |
601 | vsi->all_numtc = 0; | |
602 | vsi->req_txq = 0; | |
603 | vsi->req_rxq = 0; | |
604 | clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); | |
605 | memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); | |
606 | } | |
607 | } | |
608 | skip: | |
609 | ||
462acf6a TN |
610 | /* clear SW filtering DB */ |
611 | ice_clear_hw_tbls(hw); | |
0b28b702 | 612 | /* disable the VSIs and their queues that are not already DOWN */ |
7b9ffc76 | 613 | ice_pf_dis_all_vsi(pf, false); |
0b28b702 | 614 | |
06c16d89 | 615 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
48096710 | 616 | ice_ptp_prepare_for_reset(pf); |
06c16d89 | 617 | |
43113ff7 KK |
618 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) |
619 | ice_gnss_exit(pf); | |
620 | ||
c5a2a4a3 UK |
621 | if (hw->port_info) |
622 | ice_sched_clear_port(hw->port_info); | |
623 | ||
0b28b702 | 624 | ice_shutdown_all_ctrlq(hw); |
0f9d5027 | 625 | |
7e408e07 | 626 | set_bit(ICE_PREPARED_FOR_RESET, pf->state); |
0b28b702 AV |
627 | } |
628 | ||
629 | /** | |
630 | * ice_do_reset - Initiate one of many types of resets | |
631 | * @pf: board private structure | |
fbc7b27a | 632 | * @reset_type: reset type requested before this function was called. |
0b28b702 AV |
633 | */ |
634 | static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) | |
635 | { | |
4015d11e | 636 | struct device *dev = ice_pf_to_dev(pf); |
0b28b702 AV |
637 | struct ice_hw *hw = &pf->hw; |
638 | ||
639 | dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); | |
0b28b702 | 640 | |
3579aa86 DE |
641 | if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { |
642 | dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n"); | |
643 | reset_type = ICE_RESET_CORER; | |
644 | } | |
645 | ||
fbc7b27a | 646 | ice_prepare_for_reset(pf, reset_type); |
0b28b702 AV |
647 | |
648 | /* trigger the reset */ | |
649 | if (ice_reset(hw, reset_type)) { | |
650 | dev_err(dev, "reset %d failed\n", reset_type); | |
7e408e07 AV |
651 | set_bit(ICE_RESET_FAILED, pf->state); |
652 | clear_bit(ICE_RESET_OICR_RECV, pf->state); | |
653 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); | |
654 | clear_bit(ICE_PFR_REQ, pf->state); | |
655 | clear_bit(ICE_CORER_REQ, pf->state); | |
656 | clear_bit(ICE_GLOBR_REQ, pf->state); | |
1c08052e | 657 | wake_up(&pf->reset_wait_queue); |
0b28b702 AV |
658 | return; |
659 | } | |
660 | ||
0f9d5027 AV |
661 | /* PFR is a bit of a special case because it doesn't result in an OICR |
662 | * interrupt. So for PFR, rebuild after the reset and clear the reset- | |
663 | * associated state bits. | |
664 | */ | |
0b28b702 AV |
665 | if (reset_type == ICE_RESET_PFR) { |
666 | pf->pfr_count++; | |
462acf6a | 667 | ice_rebuild(pf, reset_type); |
7e408e07 AV |
668 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); |
669 | clear_bit(ICE_PFR_REQ, pf->state); | |
1c08052e | 670 | wake_up(&pf->reset_wait_queue); |
dac57288 | 671 | ice_reset_all_vfs(pf); |
0b28b702 AV |
672 | } |
673 | } | |
674 | ||
675 | /** | |
676 | * ice_reset_subtask - Set up for resetting the device and driver | |
677 | * @pf: board private structure | |
678 | */ | |
679 | static void ice_reset_subtask(struct ice_pf *pf) | |
680 | { | |
0f9d5027 | 681 | enum ice_reset_req reset_type = ICE_RESET_INVAL; |
0b28b702 AV |
682 | |
683 | /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an | |
0f9d5027 AV |
684 | * OICR interrupt. The OICR handler (ice_misc_intr) determines what type |
685 | * of reset is pending and sets bits in pf->state indicating the reset | |
7e408e07 | 686 | * type and ICE_RESET_OICR_RECV. So, if the latter bit is set |
0f9d5027 AV |
687 | * prepare for pending reset if not already (for PF software-initiated |
688 | * global resets the software should already be prepared for it as | |
7e408e07 | 689 | * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated |
0f9d5027 AV |
690 | * by firmware or software on other PFs, that bit is not set so prepare |
691 | * for the reset now), poll for reset done, rebuild and return. | |
0b28b702 | 692 | */ |
7e408e07 | 693 | if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { |
2ebd4428 | 694 | /* Perform the largest reset requested */ |
7e408e07 | 695 | if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) |
2ebd4428 | 696 | reset_type = ICE_RESET_CORER; |
7e408e07 | 697 | if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) |
2ebd4428 | 698 | reset_type = ICE_RESET_GLOBR; |
7e408e07 | 699 | if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) |
03af8406 | 700 | reset_type = ICE_RESET_EMPR; |
2ebd4428 DE |
701 | /* return if no valid reset type requested */ |
702 | if (reset_type == ICE_RESET_INVAL) | |
703 | return; | |
fbc7b27a | 704 | ice_prepare_for_reset(pf, reset_type); |
0b28b702 AV |
705 | |
706 | /* make sure we are ready to rebuild */ | |
fd2a9817 | 707 | if (ice_check_reset(&pf->hw)) { |
7e408e07 | 708 | set_bit(ICE_RESET_FAILED, pf->state); |
fd2a9817 AV |
709 | } else { |
710 | /* done with reset. start rebuild */ | |
711 | pf->hw.reset_ongoing = false; | |
462acf6a | 712 | ice_rebuild(pf, reset_type); |
0f9d5027 | 713 | /* clear bit to resume normal operations, but |
94c4441b | 714 | * ICE_NEEDS_RESTART bit is set in case rebuild failed |
0f9d5027 | 715 | */ |
7e408e07 AV |
716 | clear_bit(ICE_RESET_OICR_RECV, pf->state); |
717 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); | |
718 | clear_bit(ICE_PFR_REQ, pf->state); | |
719 | clear_bit(ICE_CORER_REQ, pf->state); | |
720 | clear_bit(ICE_GLOBR_REQ, pf->state); | |
1c08052e | 721 | wake_up(&pf->reset_wait_queue); |
dac57288 | 722 | ice_reset_all_vfs(pf); |
fd2a9817 | 723 | } |
0f9d5027 AV |
724 | |
725 | return; | |
0b28b702 AV |
726 | } |
727 | ||
728 | /* No pending resets to finish processing. Check for new resets */ | |
3579aa86 | 729 | if (test_bit(ICE_PFR_REQ, pf->state)) { |
0f9d5027 | 730 | reset_type = ICE_RESET_PFR; |
3579aa86 DE |
731 | if (pf->lag && pf->lag->bonded) { |
732 | dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n"); | |
733 | reset_type = ICE_RESET_CORER; | |
734 | } | |
735 | } | |
7e408e07 | 736 | if (test_bit(ICE_CORER_REQ, pf->state)) |
0f9d5027 | 737 | reset_type = ICE_RESET_CORER; |
7e408e07 | 738 | if (test_bit(ICE_GLOBR_REQ, pf->state)) |
0b28b702 | 739 | reset_type = ICE_RESET_GLOBR; |
0f9d5027 AV |
740 | /* If no valid reset type requested just return */ |
741 | if (reset_type == ICE_RESET_INVAL) | |
742 | return; | |
0b28b702 | 743 | |
0f9d5027 | 744 | /* reset if not already down or busy */ |
7e408e07 AV |
745 | if (!test_bit(ICE_DOWN, pf->state) && |
746 | !test_bit(ICE_CFG_BUSY, pf->state)) { | |
0b28b702 AV |
747 | ice_do_reset(pf, reset_type); |
748 | } | |
0b28b702 AV |
749 | } |
750 | ||
2e0ab37c JB |
751 | /** |
752 | * ice_print_topo_conflict - print topology conflict message | |
753 | * @vsi: the VSI whose topology status is being checked | |
754 | */ | |
755 | static void ice_print_topo_conflict(struct ice_vsi *vsi) | |
756 | { | |
757 | switch (vsi->port_info->phy.link_info.topo_media_conflict) { | |
758 | case ICE_AQ_LINK_TOPO_CONFLICT: | |
759 | case ICE_AQ_LINK_MEDIA_CONFLICT: | |
5878589d PG |
760 | case ICE_AQ_LINK_TOPO_UNREACH_PRT: |
761 | case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: | |
762 | case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: | |
5c57145a | 763 | netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); |
2e0ab37c | 764 | break; |
5878589d | 765 | case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: |
4fc5fbee AV |
766 | if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) |
767 | netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); | |
768 | else | |
769 | netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); | |
5878589d | 770 | break; |
2e0ab37c JB |
771 | default: |
772 | break; | |
773 | } | |
774 | } | |
775 | ||
cdedef59 AV |
776 | /** |
777 | * ice_print_link_msg - print link up or down message | |
778 | * @vsi: the VSI whose link status is being queried | |
779 | * @isup: boolean for if the link is now up or down | |
780 | */ | |
fcea6f3d | 781 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup) |
cdedef59 | 782 | { |
f776b3ac | 783 | struct ice_aqc_get_phy_caps_data *caps; |
5ee30564 | 784 | const char *an_advertised; |
f776b3ac | 785 | const char *fec_req; |
cdedef59 | 786 | const char *speed; |
f776b3ac | 787 | const char *fec; |
cdedef59 | 788 | const char *fc; |
43260988 | 789 | const char *an; |
5518ac2a | 790 | int status; |
cdedef59 | 791 | |
c2a23e00 BC |
792 | if (!vsi) |
793 | return; | |
794 | ||
cdedef59 AV |
795 | if (vsi->current_isup == isup) |
796 | return; | |
797 | ||
798 | vsi->current_isup = isup; | |
799 | ||
800 | if (!isup) { | |
801 | netdev_info(vsi->netdev, "NIC Link is Down\n"); | |
802 | return; | |
803 | } | |
804 | ||
805 | switch (vsi->port_info->phy.link_info.link_speed) { | |
072efdf8 AV |
806 | case ICE_AQ_LINK_SPEED_100GB: |
807 | speed = "100 G"; | |
808 | break; | |
809 | case ICE_AQ_LINK_SPEED_50GB: | |
810 | speed = "50 G"; | |
811 | break; | |
cdedef59 AV |
812 | case ICE_AQ_LINK_SPEED_40GB: |
813 | speed = "40 G"; | |
814 | break; | |
815 | case ICE_AQ_LINK_SPEED_25GB: | |
816 | speed = "25 G"; | |
817 | break; | |
818 | case ICE_AQ_LINK_SPEED_20GB: | |
819 | speed = "20 G"; | |
820 | break; | |
821 | case ICE_AQ_LINK_SPEED_10GB: | |
822 | speed = "10 G"; | |
823 | break; | |
824 | case ICE_AQ_LINK_SPEED_5GB: | |
825 | speed = "5 G"; | |
826 | break; | |
827 | case ICE_AQ_LINK_SPEED_2500MB: | |
828 | speed = "2.5 G"; | |
829 | break; | |
830 | case ICE_AQ_LINK_SPEED_1000MB: | |
831 | speed = "1 G"; | |
832 | break; | |
833 | case ICE_AQ_LINK_SPEED_100MB: | |
834 | speed = "100 M"; | |
835 | break; | |
836 | default: | |
5b13886d | 837 | speed = "Unknown "; |
cdedef59 AV |
838 | break; |
839 | } | |
840 | ||
841 | switch (vsi->port_info->fc.current_mode) { | |
842 | case ICE_FC_FULL: | |
2f2da36e | 843 | fc = "Rx/Tx"; |
cdedef59 AV |
844 | break; |
845 | case ICE_FC_TX_PAUSE: | |
2f2da36e | 846 | fc = "Tx"; |
cdedef59 AV |
847 | break; |
848 | case ICE_FC_RX_PAUSE: | |
2f2da36e | 849 | fc = "Rx"; |
cdedef59 | 850 | break; |
203a068a BC |
851 | case ICE_FC_NONE: |
852 | fc = "None"; | |
853 | break; | |
cdedef59 AV |
854 | default: |
855 | fc = "Unknown"; | |
856 | break; | |
857 | } | |
858 | ||
f776b3ac PG |
859 | /* Get FEC mode based on negotiated link info */ |
860 | switch (vsi->port_info->phy.link_info.fec_info) { | |
861 | case ICE_AQ_LINK_25G_RS_528_FEC_EN: | |
f776b3ac PG |
862 | case ICE_AQ_LINK_25G_RS_544_FEC_EN: |
863 | fec = "RS-FEC"; | |
864 | break; | |
865 | case ICE_AQ_LINK_25G_KR_FEC_EN: | |
866 | fec = "FC-FEC/BASE-R"; | |
867 | break; | |
868 | default: | |
869 | fec = "NONE"; | |
870 | break; | |
871 | } | |
872 | ||
43260988 JB |
873 | /* check if autoneg completed, might be false due to not supported */ |
874 | if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) | |
875 | an = "True"; | |
876 | else | |
877 | an = "False"; | |
878 | ||
f776b3ac | 879 | /* Get FEC mode requested based on PHY caps last SW configuration */ |
9efe35d0 | 880 | caps = kzalloc(sizeof(*caps), GFP_KERNEL); |
f776b3ac PG |
881 | if (!caps) { |
882 | fec_req = "Unknown"; | |
5ee30564 | 883 | an_advertised = "Unknown"; |
f776b3ac PG |
884 | goto done; |
885 | } | |
886 | ||
887 | status = ice_aq_get_phy_caps(vsi->port_info, false, | |
d6730a87 | 888 | ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); |
f776b3ac PG |
889 | if (status) |
890 | netdev_info(vsi->netdev, "Get phy capability failed.\n"); | |
891 | ||
5ee30564 PG |
892 | an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; |
893 | ||
f776b3ac PG |
894 | if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || |
895 | caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) | |
896 | fec_req = "RS-FEC"; | |
897 | else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || | |
898 | caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) | |
899 | fec_req = "FC-FEC/BASE-R"; | |
900 | else | |
901 | fec_req = "NONE"; | |
902 | ||
9efe35d0 | 903 | kfree(caps); |
f776b3ac PG |
904 | |
905 | done: | |
5ee30564 PG |
906 | netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", |
907 | speed, fec_req, fec, an_advertised, an, fc); | |
2e0ab37c | 908 | ice_print_topo_conflict(vsi); |
cdedef59 AV |
909 | } |
910 | ||
0b28b702 | 911 | /** |
f9867df6 AV |
912 | * ice_vsi_link_event - update the VSI's netdev |
913 | * @vsi: the VSI on which the link event occurred | |
914 | * @link_up: whether or not the VSI needs to be set up or down | |
0b28b702 AV |
915 | */ |
916 | static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) | |
917 | { | |
c2a23e00 BC |
918 | if (!vsi) |
919 | return; | |
920 | ||
e97fb1ae | 921 | if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) |
0b28b702 AV |
922 | return; |
923 | ||
924 | if (vsi->type == ICE_VSI_PF) { | |
c2a23e00 | 925 | if (link_up == netif_carrier_ok(vsi->netdev)) |
0b28b702 | 926 | return; |
c2a23e00 | 927 | |
0b28b702 AV |
928 | if (link_up) { |
929 | netif_carrier_on(vsi->netdev); | |
930 | netif_tx_wake_all_queues(vsi->netdev); | |
931 | } else { | |
932 | netif_carrier_off(vsi->netdev); | |
933 | netif_tx_stop_all_queues(vsi->netdev); | |
934 | } | |
935 | } | |
936 | } | |
937 | ||
7d9c9b79 DE |
938 | /** |
939 | * ice_set_dflt_mib - send a default config MIB to the FW | |
940 | * @pf: private PF struct | |
941 | * | |
942 | * This function sends a default configuration MIB to the FW. | |
943 | * | |
944 | * If this function errors out at any point, the driver is still able to | |
945 | * function. The main impact is that LFC may not operate as expected. | |
946 | * Therefore an error state in this function should be treated with a DBG | |
947 | * message and continue on with driver rebuild/reenable. | |
948 | */ | |
949 | static void ice_set_dflt_mib(struct ice_pf *pf) | |
950 | { | |
951 | struct device *dev = ice_pf_to_dev(pf); | |
952 | u8 mib_type, *buf, *lldpmib = NULL; | |
953 | u16 len, typelen, offset = 0; | |
954 | struct ice_lldp_org_tlv *tlv; | |
12aae8f1 | 955 | struct ice_hw *hw = &pf->hw; |
7d9c9b79 DE |
956 | u32 ouisubtype; |
957 | ||
7d9c9b79 DE |
958 | mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; |
959 | lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); | |
960 | if (!lldpmib) { | |
961 | dev_dbg(dev, "%s Failed to allocate MIB memory\n", | |
962 | __func__); | |
963 | return; | |
964 | } | |
965 | ||
966 | /* Add ETS CFG TLV */ | |
967 | tlv = (struct ice_lldp_org_tlv *)lldpmib; | |
968 | typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | | |
969 | ICE_IEEE_ETS_TLV_LEN); | |
970 | tlv->typelen = htons(typelen); | |
971 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | | |
972 | ICE_IEEE_SUBTYPE_ETS_CFG); | |
973 | tlv->ouisubtype = htonl(ouisubtype); | |
974 | ||
975 | buf = tlv->tlvinfo; | |
976 | buf[0] = 0; | |
977 | ||
978 | /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. | |
979 | * Octets 5 - 12 are BW values, set octet 5 to 100% BW. | |
980 | * Octets 13 - 20 are TSA values - leave as zeros | |
981 | */ | |
982 | buf[5] = 0x64; | |
5a259f8e | 983 | len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); |
7d9c9b79 DE |
984 | offset += len + 2; |
985 | tlv = (struct ice_lldp_org_tlv *) | |
986 | ((char *)tlv + sizeof(tlv->typelen) + len); | |
987 | ||
988 | /* Add ETS REC TLV */ | |
989 | buf = tlv->tlvinfo; | |
990 | tlv->typelen = htons(typelen); | |
991 | ||
992 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | | |
993 | ICE_IEEE_SUBTYPE_ETS_REC); | |
994 | tlv->ouisubtype = htonl(ouisubtype); | |
995 | ||
996 | /* First octet of buf is reserved | |
997 | * Octets 1 - 4 map UP to TC - all UPs map to zero | |
998 | * Octets 5 - 12 are BW values - set TC 0 to 100%. | |
999 | * Octets 13 - 20 are TSA value - leave as zeros | |
1000 | */ | |
1001 | buf[5] = 0x64; | |
1002 | offset += len + 2; | |
1003 | tlv = (struct ice_lldp_org_tlv *) | |
1004 | ((char *)tlv + sizeof(tlv->typelen) + len); | |
1005 | ||
1006 | /* Add PFC CFG TLV */ | |
1007 | typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | | |
1008 | ICE_IEEE_PFC_TLV_LEN); | |
1009 | tlv->typelen = htons(typelen); | |
1010 | ||
1011 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | | |
1012 | ICE_IEEE_SUBTYPE_PFC_CFG); | |
1013 | tlv->ouisubtype = htonl(ouisubtype); | |
1014 | ||
1015 | /* Octet 1 left as all zeros - PFC disabled */ | |
1016 | buf[0] = 0x08; | |
5a259f8e | 1017 | len = FIELD_GET(ICE_LLDP_TLV_LEN_M, typelen); |
7d9c9b79 DE |
1018 | offset += len + 2; |
1019 | ||
1020 | if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) | |
1021 | dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); | |
1022 | ||
1023 | kfree(lldpmib); | |
1024 | } | |
1025 | ||
99d40752 BC |
1026 | /** |
1027 | * ice_check_phy_fw_load - check if PHY FW load failed | |
1028 | * @pf: pointer to PF struct | |
1029 | * @link_cfg_err: bitmap from the link info structure | |
1030 | * | |
1031 | * check if external PHY FW load failed and print an error message if it did | |
1032 | */ | |
1033 | static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err) | |
1034 | { | |
1035 | if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) { | |
1036 | clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); | |
1037 | return; | |
1038 | } | |
1039 | ||
1040 | if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) | |
1041 | return; | |
1042 | ||
1043 | if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) { | |
1044 | dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n"); | |
1045 | set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); | |
1046 | } | |
1047 | } | |
1048 | ||
c77849f5 AV |
1049 | /** |
1050 | * ice_check_module_power | |
1051 | * @pf: pointer to PF struct | |
1052 | * @link_cfg_err: bitmap from the link info structure | |
1053 | * | |
1054 | * check module power level returned by a previous call to aq_get_link_info | |
1055 | * and print error messages if module power level is not supported | |
1056 | */ | |
1057 | static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) | |
1058 | { | |
1059 | /* if module power level is supported, clear the flag */ | |
1060 | if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | | |
1061 | ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { | |
1062 | clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); | |
1063 | return; | |
1064 | } | |
1065 | ||
1066 | /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the | |
1067 | * above block didn't clear this bit, there's nothing to do | |
1068 | */ | |
1069 | if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) | |
1070 | return; | |
1071 | ||
1072 | if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { | |
1073 | dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); | |
1074 | set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); | |
1075 | } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { | |
1076 | dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); | |
1077 | set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); | |
1078 | } | |
1079 | } | |
1080 | ||
99d40752 BC |
1081 | /** |
1082 | * ice_check_link_cfg_err - check if link configuration failed | |
1083 | * @pf: pointer to the PF struct | |
1084 | * @link_cfg_err: bitmap from the link info structure | |
1085 | * | |
1086 | * print if any link configuration failure happens due to the value in the | |
1087 | * link_cfg_err parameter in the link info structure | |
1088 | */ | |
1089 | static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err) | |
1090 | { | |
1091 | ice_check_module_power(pf, link_cfg_err); | |
1092 | ice_check_phy_fw_load(pf, link_cfg_err); | |
1093 | } | |
1094 | ||
0b28b702 AV |
1095 | /** |
1096 | * ice_link_event - process the link event | |
2f2da36e | 1097 | * @pf: PF that the link event is associated with |
0b28b702 | 1098 | * @pi: port_info for the port that the link event is associated with |
c2a23e00 BC |
1099 | * @link_up: true if the physical link is up and false if it is down |
1100 | * @link_speed: current link speed received from the link event | |
0b28b702 | 1101 | * |
c2a23e00 | 1102 | * Returns 0 on success and negative on failure |
0b28b702 AV |
1103 | */ |
1104 | static int | |
c2a23e00 BC |
1105 | ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, |
1106 | u16 link_speed) | |
0b28b702 | 1107 | { |
4015d11e | 1108 | struct device *dev = ice_pf_to_dev(pf); |
0b28b702 | 1109 | struct ice_phy_info *phy_info; |
c2a23e00 BC |
1110 | struct ice_vsi *vsi; |
1111 | u16 old_link_speed; | |
1112 | bool old_link; | |
5518ac2a | 1113 | int status; |
0b28b702 AV |
1114 | |
1115 | phy_info = &pi->phy; | |
1116 | phy_info->link_info_old = phy_info->link_info; | |
0b28b702 | 1117 | |
c2a23e00 | 1118 | old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); |
0b28b702 AV |
1119 | old_link_speed = phy_info->link_info_old.link_speed; |
1120 | ||
c2a23e00 BC |
1121 | /* update the link info structures and re-enable link events, |
1122 | * don't bail on failure due to other book keeping needed | |
1123 | */ | |
d348d517 AV |
1124 | status = ice_update_link_info(pi); |
1125 | if (status) | |
5f87ec48 TN |
1126 | dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n", |
1127 | pi->lport, status, | |
d348d517 | 1128 | ice_aq_str(pi->hw->adminq.sq_last_status)); |
0b28b702 | 1129 | |
99d40752 | 1130 | ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); |
c77849f5 | 1131 | |
0ce6c34a DE |
1132 | /* Check if the link state is up after updating link info, and treat |
1133 | * this event as an UP event since the link is actually UP now. | |
1134 | */ | |
1135 | if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) | |
1136 | link_up = true; | |
1137 | ||
208ff751 | 1138 | vsi = ice_get_main_vsi(pf); |
c2a23e00 BC |
1139 | if (!vsi || !vsi->port_info) |
1140 | return -EINVAL; | |
0b28b702 | 1141 | |
6d599946 TN |
1142 | /* turn off PHY if media was removed */ |
1143 | if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && | |
1144 | !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { | |
1145 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); | |
d348d517 | 1146 | ice_set_link(vsi, false); |
6d599946 TN |
1147 | } |
1148 | ||
1a3571b5 PG |
1149 | /* if the old link up/down and speed is the same as the new */ |
1150 | if (link_up == old_link && link_speed == old_link_speed) | |
d348d517 | 1151 | return 0; |
1a3571b5 | 1152 | |
6b1ff5d3 | 1153 | ice_ptp_link_change(pf, pf->hw.pf_id, link_up); |
3a749623 | 1154 | |
7d9c9b79 DE |
1155 | if (ice_is_dcb_active(pf)) { |
1156 | if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) | |
1157 | ice_dcb_rebuild(pf); | |
1158 | } else { | |
1159 | if (link_up) | |
1160 | ice_set_dflt_mib(pf); | |
1161 | } | |
c2a23e00 BC |
1162 | ice_vsi_link_event(vsi, link_up); |
1163 | ice_print_link_msg(vsi, link_up); | |
0b28b702 | 1164 | |
26a91525 | 1165 | ice_vc_notify_link_state(pf); |
53b8decb | 1166 | |
d348d517 | 1167 | return 0; |
0b28b702 AV |
1168 | } |
1169 | ||
1170 | /** | |
4f4be03b AV |
1171 | * ice_watchdog_subtask - periodic tasks not using event driven scheduling |
1172 | * @pf: board private structure | |
0b28b702 | 1173 | */ |
4f4be03b | 1174 | static void ice_watchdog_subtask(struct ice_pf *pf) |
0b28b702 | 1175 | { |
4f4be03b | 1176 | int i; |
0b28b702 | 1177 | |
4f4be03b | 1178 | /* if interface is down do nothing */ |
7e408e07 AV |
1179 | if (test_bit(ICE_DOWN, pf->state) || |
1180 | test_bit(ICE_CFG_BUSY, pf->state)) | |
4f4be03b | 1181 | return; |
0b28b702 | 1182 | |
4f4be03b AV |
1183 | /* make sure we don't do these things too often */ |
1184 | if (time_before(jiffies, | |
1185 | pf->serv_tmr_prev + pf->serv_tmr_period)) | |
1186 | return; | |
0b28b702 | 1187 | |
4f4be03b AV |
1188 | pf->serv_tmr_prev = jiffies; |
1189 | ||
4f4be03b AV |
1190 | /* Update the stats for active netdevs so the network stack |
1191 | * can look at updated numbers whenever it cares to | |
1192 | */ | |
1193 | ice_update_pf_stats(pf); | |
80ed404a | 1194 | ice_for_each_vsi(pf, i) |
4f4be03b AV |
1195 | if (pf->vsi[i] && pf->vsi[i]->netdev) |
1196 | ice_update_vsi_stats(pf->vsi[i]); | |
0b28b702 AV |
1197 | } |
1198 | ||
250c3b3e BC |
1199 | /** |
1200 | * ice_init_link_events - enable/initialize link events | |
1201 | * @pi: pointer to the port_info instance | |
1202 | * | |
1203 | * Returns -EIO on failure, 0 on success | |
1204 | */ | |
1205 | static int ice_init_link_events(struct ice_port_info *pi) | |
1206 | { | |
1207 | u16 mask; | |
1208 | ||
1209 | mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | | |
99d40752 BC |
1210 | ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL | |
1211 | ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL)); | |
250c3b3e BC |
1212 | |
1213 | if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { | |
19cce2c6 | 1214 | dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", |
250c3b3e BC |
1215 | pi->lport); |
1216 | return -EIO; | |
1217 | } | |
1218 | ||
1219 | if (ice_aq_get_link_info(pi, true, NULL, NULL)) { | |
19cce2c6 | 1220 | dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", |
250c3b3e BC |
1221 | pi->lport); |
1222 | return -EIO; | |
1223 | } | |
1224 | ||
1225 | return 0; | |
1226 | } | |
1227 | ||
1228 | /** | |
1229 | * ice_handle_link_event - handle link event via ARQ | |
2f2da36e | 1230 | * @pf: PF that the link event is associated with |
c2a23e00 | 1231 | * @event: event structure containing link status info |
250c3b3e | 1232 | */ |
c2a23e00 BC |
1233 | static int |
1234 | ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) | |
250c3b3e | 1235 | { |
c2a23e00 | 1236 | struct ice_aqc_get_link_status_data *link_data; |
250c3b3e BC |
1237 | struct ice_port_info *port_info; |
1238 | int status; | |
1239 | ||
c2a23e00 | 1240 | link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; |
250c3b3e BC |
1241 | port_info = pf->hw.port_info; |
1242 | if (!port_info) | |
1243 | return -EINVAL; | |
1244 | ||
c2a23e00 BC |
1245 | status = ice_link_event(pf, port_info, |
1246 | !!(link_data->link_info & ICE_AQ_LINK_UP), | |
1247 | le16_to_cpu(link_data->link_speed)); | |
250c3b3e | 1248 | if (status) |
19cce2c6 AV |
1249 | dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", |
1250 | status); | |
250c3b3e BC |
1251 | |
1252 | return status; | |
1253 | } | |
1254 | ||
9d3535e7 PSJ |
1255 | /** |
1256 | * ice_get_fwlog_data - copy the FW log data from ARQ event | |
1257 | * @pf: PF that the FW log event is associated with | |
1258 | * @event: event structure containing FW log data | |
1259 | */ | |
1260 | static void | |
1261 | ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event) | |
1262 | { | |
1263 | struct ice_fwlog_data *fwlog; | |
1264 | struct ice_hw *hw = &pf->hw; | |
1265 | ||
1266 | fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail]; | |
1267 | ||
1268 | memset(fwlog->data, 0, PAGE_SIZE); | |
1269 | fwlog->data_size = le16_to_cpu(event->desc.datalen); | |
1270 | ||
1271 | memcpy(fwlog->data, event->msg_buf, fwlog->data_size); | |
1272 | ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size); | |
1273 | ||
1274 | if (ice_fwlog_ring_full(&hw->fwlog_ring)) { | |
1275 | /* the rings are full so bump the head to create room */ | |
1276 | ice_fwlog_ring_increment(&hw->fwlog_ring.head, | |
1277 | hw->fwlog_ring.size); | |
1278 | } | |
1279 | } | |
1280 | ||
d69ea414 | 1281 | /** |
fb9840c4 | 1282 | * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware |
d69ea414 | 1283 | * @pf: pointer to the PF private structure |
fb9840c4 | 1284 | * @task: intermediate helper storage and identifier for waiting |
d69ea414 | 1285 | * @opcode: the opcode to wait for |
d69ea414 | 1286 | * |
fb9840c4 PK |
1287 | * Prepares to wait for a specific AdminQ completion event on the ARQ for |
1288 | * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event(). | |
d69ea414 | 1289 | * |
fb9840c4 PK |
1290 | * Calls are separated to allow caller registering for event before sending |
1291 | * the command, which mitigates a race between registering and FW responding. | |
d69ea414 | 1292 | * |
fb9840c4 PK |
1293 | * To obtain only the descriptor contents, pass an task->event with null |
1294 | * msg_buf. If the complete data buffer is desired, allocate the | |
1295 | * task->event.msg_buf with enough space ahead of time. | |
d69ea414 | 1296 | */ |
fb9840c4 PK |
1297 | void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task, |
1298 | u16 opcode) | |
d69ea414 | 1299 | { |
d69ea414 JK |
1300 | INIT_HLIST_NODE(&task->entry); |
1301 | task->opcode = opcode; | |
d69ea414 | 1302 | task->state = ICE_AQ_TASK_WAITING; |
d69ea414 | 1303 | |
d69ea414 JK |
1304 | spin_lock_bh(&pf->aq_wait_lock); |
1305 | hlist_add_head(&task->entry, &pf->aq_wait_list); | |
1306 | spin_unlock_bh(&pf->aq_wait_lock); | |
fb9840c4 | 1307 | } |
d69ea414 JK |
1308 | |
1309 | /** | |
ef860480 | 1310 | * ice_aq_wait_for_event - Wait for an AdminQ event from firmware |
d69ea414 | 1311 | * @pf: pointer to the PF private structure |
fb9840c4 | 1312 | * @task: ptr prepared by ice_aq_prep_for_event() |
d69ea414 | 1313 | * @timeout: how long to wait, in jiffies |
d69ea414 JK |
1314 | * |
1315 | * Waits for a specific AdminQ completion event on the ARQ for a given PF. The | |
1316 | * current thread will be put to sleep until the specified event occurs or | |
1317 | * until the given timeout is reached. | |
1318 | * | |
d69ea414 JK |
1319 | * Returns: zero on success, or a negative error code on failure. |
1320 | */ | |
fb9840c4 PK |
1321 | int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task, |
1322 | unsigned long timeout) | |
d69ea414 | 1323 | { |
fb9840c4 | 1324 | enum ice_aq_task_state *state = &task->state; |
1e8249cc | 1325 | struct device *dev = ice_pf_to_dev(pf); |
fb9840c4 | 1326 | unsigned long start = jiffies; |
d69ea414 JK |
1327 | long ret; |
1328 | int err; | |
1329 | ||
fb9840c4 PK |
1330 | ret = wait_event_interruptible_timeout(pf->aq_wait_queue, |
1331 | *state != ICE_AQ_TASK_WAITING, | |
d69ea414 | 1332 | timeout); |
fb9840c4 PK |
1333 | switch (*state) { |
1334 | case ICE_AQ_TASK_NOT_PREPARED: | |
1335 | WARN(1, "call to %s without ice_aq_prep_for_event()", __func__); | |
1336 | err = -EINVAL; | |
1337 | break; | |
d69ea414 JK |
1338 | case ICE_AQ_TASK_WAITING: |
1339 | err = ret < 0 ? ret : -ETIMEDOUT; | |
1340 | break; | |
1341 | case ICE_AQ_TASK_CANCELED: | |
1342 | err = ret < 0 ? ret : -ECANCELED; | |
1343 | break; | |
1344 | case ICE_AQ_TASK_COMPLETE: | |
1345 | err = ret < 0 ? ret : 0; | |
1346 | break; | |
1347 | default: | |
fb9840c4 | 1348 | WARN(1, "Unexpected AdminQ wait task state %u", *state); |
d69ea414 JK |
1349 | err = -EINVAL; |
1350 | break; | |
1351 | } | |
1352 | ||
1e8249cc JK |
1353 | dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", |
1354 | jiffies_to_msecs(jiffies - start), | |
1355 | jiffies_to_msecs(timeout), | |
fb9840c4 | 1356 | task->opcode); |
1e8249cc | 1357 | |
d69ea414 JK |
1358 | spin_lock_bh(&pf->aq_wait_lock); |
1359 | hlist_del(&task->entry); | |
1360 | spin_unlock_bh(&pf->aq_wait_lock); | |
d69ea414 JK |
1361 | |
1362 | return err; | |
1363 | } | |
1364 | ||
1365 | /** | |
1366 | * ice_aq_check_events - Check if any thread is waiting for an AdminQ event | |
1367 | * @pf: pointer to the PF private structure | |
1368 | * @opcode: the opcode of the event | |
1369 | * @event: the event to check | |
1370 | * | |
1371 | * Loops over the current list of pending threads waiting for an AdminQ event. | |
1372 | * For each matching task, copy the contents of the event into the task | |
1373 | * structure and wake up the thread. | |
1374 | * | |
1375 | * If multiple threads wait for the same opcode, they will all be woken up. | |
1376 | * | |
1377 | * Note that event->msg_buf will only be duplicated if the event has a buffer | |
1378 | * with enough space already allocated. Otherwise, only the descriptor and | |
1379 | * message length will be copied. | |
1380 | * | |
1381 | * Returns: true if an event was found, false otherwise | |
1382 | */ | |
1383 | static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, | |
1384 | struct ice_rq_event_info *event) | |
1385 | { | |
e1e8a142 | 1386 | struct ice_rq_event_info *task_ev; |
d69ea414 JK |
1387 | struct ice_aq_task *task; |
1388 | bool found = false; | |
1389 | ||
1390 | spin_lock_bh(&pf->aq_wait_lock); | |
1391 | hlist_for_each_entry(task, &pf->aq_wait_list, entry) { | |
fb9840c4 PK |
1392 | if (task->state != ICE_AQ_TASK_WAITING) |
1393 | continue; | |
1394 | if (task->opcode != opcode) | |
d69ea414 JK |
1395 | continue; |
1396 | ||
b214b98a | 1397 | task_ev = &task->event; |
e1e8a142 PK |
1398 | memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); |
1399 | task_ev->msg_len = event->msg_len; | |
d69ea414 JK |
1400 | |
1401 | /* Only copy the data buffer if a destination was set */ | |
e1e8a142 PK |
1402 | if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { |
1403 | memcpy(task_ev->msg_buf, event->msg_buf, | |
d69ea414 | 1404 | event->buf_len); |
e1e8a142 | 1405 | task_ev->buf_len = event->buf_len; |
d69ea414 JK |
1406 | } |
1407 | ||
1408 | task->state = ICE_AQ_TASK_COMPLETE; | |
1409 | found = true; | |
1410 | } | |
1411 | spin_unlock_bh(&pf->aq_wait_lock); | |
1412 | ||
1413 | if (found) | |
1414 | wake_up(&pf->aq_wait_queue); | |
1415 | } | |
1416 | ||
1417 | /** | |
1418 | * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks | |
1419 | * @pf: the PF private structure | |
1420 | * | |
1421 | * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. | |
1422 | * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. | |
1423 | */ | |
1424 | static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) | |
1425 | { | |
1426 | struct ice_aq_task *task; | |
1427 | ||
1428 | spin_lock_bh(&pf->aq_wait_lock); | |
1429 | hlist_for_each_entry(task, &pf->aq_wait_list, entry) | |
1430 | task->state = ICE_AQ_TASK_CANCELED; | |
1431 | spin_unlock_bh(&pf->aq_wait_lock); | |
1432 | ||
1433 | wake_up(&pf->aq_wait_queue); | |
1434 | } | |
1435 | ||
afc24d65 JK |
1436 | #define ICE_MBX_OVERFLOW_WATERMARK 64 |
1437 | ||
940b61af AV |
1438 | /** |
1439 | * __ice_clean_ctrlq - helper function to clean controlq rings | |
1440 | * @pf: ptr to struct ice_pf | |
1441 | * @q_type: specific Control queue type | |
1442 | */ | |
1443 | static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |
1444 | { | |
4015d11e | 1445 | struct device *dev = ice_pf_to_dev(pf); |
940b61af AV |
1446 | struct ice_rq_event_info event; |
1447 | struct ice_hw *hw = &pf->hw; | |
1448 | struct ice_ctl_q_info *cq; | |
1449 | u16 pending, i = 0; | |
1450 | const char *qtype; | |
1451 | u32 oldval, val; | |
1452 | ||
0b28b702 | 1453 | /* Do not clean control queue if/when PF reset fails */ |
7e408e07 | 1454 | if (test_bit(ICE_RESET_FAILED, pf->state)) |
0b28b702 AV |
1455 | return 0; |
1456 | ||
940b61af AV |
1457 | switch (q_type) { |
1458 | case ICE_CTL_Q_ADMIN: | |
1459 | cq = &hw->adminq; | |
1460 | qtype = "Admin"; | |
1461 | break; | |
8f5ee3c4 JK |
1462 | case ICE_CTL_Q_SB: |
1463 | cq = &hw->sbq; | |
1464 | qtype = "Sideband"; | |
1465 | break; | |
75d2b253 AV |
1466 | case ICE_CTL_Q_MAILBOX: |
1467 | cq = &hw->mailboxq; | |
1468 | qtype = "Mailbox"; | |
0891c896 VS |
1469 | /* we are going to try to detect a malicious VF, so set the |
1470 | * state to begin detection | |
1471 | */ | |
1472 | hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; | |
75d2b253 | 1473 | break; |
940b61af | 1474 | default: |
4015d11e | 1475 | dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); |
940b61af AV |
1476 | return 0; |
1477 | } | |
1478 | ||
1479 | /* check for error indications - PF_xx_AxQLEN register layout for | |
1480 | * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. | |
1481 | */ | |
1482 | val = rd32(hw, cq->rq.len); | |
1483 | if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | | |
1484 | PF_FW_ARQLEN_ARQCRIT_M)) { | |
1485 | oldval = val; | |
1486 | if (val & PF_FW_ARQLEN_ARQVFE_M) | |
4015d11e BC |
1487 | dev_dbg(dev, "%s Receive Queue VF Error detected\n", |
1488 | qtype); | |
940b61af | 1489 | if (val & PF_FW_ARQLEN_ARQOVFL_M) { |
19cce2c6 | 1490 | dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", |
940b61af AV |
1491 | qtype); |
1492 | } | |
1493 | if (val & PF_FW_ARQLEN_ARQCRIT_M) | |
19cce2c6 | 1494 | dev_dbg(dev, "%s Receive Queue Critical Error detected\n", |
940b61af AV |
1495 | qtype); |
1496 | val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | | |
1497 | PF_FW_ARQLEN_ARQCRIT_M); | |
1498 | if (oldval != val) | |
1499 | wr32(hw, cq->rq.len, val); | |
1500 | } | |
1501 | ||
1502 | val = rd32(hw, cq->sq.len); | |
1503 | if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | | |
1504 | PF_FW_ATQLEN_ATQCRIT_M)) { | |
1505 | oldval = val; | |
1506 | if (val & PF_FW_ATQLEN_ATQVFE_M) | |
19cce2c6 AV |
1507 | dev_dbg(dev, "%s Send Queue VF Error detected\n", |
1508 | qtype); | |
940b61af | 1509 | if (val & PF_FW_ATQLEN_ATQOVFL_M) { |
4015d11e | 1510 | dev_dbg(dev, "%s Send Queue Overflow Error detected\n", |
940b61af AV |
1511 | qtype); |
1512 | } | |
1513 | if (val & PF_FW_ATQLEN_ATQCRIT_M) | |
4015d11e | 1514 | dev_dbg(dev, "%s Send Queue Critical Error detected\n", |
940b61af AV |
1515 | qtype); |
1516 | val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | | |
1517 | PF_FW_ATQLEN_ATQCRIT_M); | |
1518 | if (oldval != val) | |
1519 | wr32(hw, cq->sq.len, val); | |
1520 | } | |
1521 | ||
1522 | event.buf_len = cq->rq_buf_size; | |
9efe35d0 | 1523 | event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); |
940b61af AV |
1524 | if (!event.msg_buf) |
1525 | return 0; | |
1526 | ||
1527 | do { | |
afc24d65 | 1528 | struct ice_mbx_data data = {}; |
0b28b702 | 1529 | u16 opcode; |
5518ac2a | 1530 | int ret; |
940b61af AV |
1531 | |
1532 | ret = ice_clean_rq_elem(hw, cq, &event, &pending); | |
d54699e2 | 1533 | if (ret == -EALREADY) |
940b61af AV |
1534 | break; |
1535 | if (ret) { | |
5f87ec48 TN |
1536 | dev_err(dev, "%s Receive Queue event error %d\n", qtype, |
1537 | ret); | |
940b61af AV |
1538 | break; |
1539 | } | |
0b28b702 AV |
1540 | |
1541 | opcode = le16_to_cpu(event.desc.opcode); | |
1542 | ||
d69ea414 JK |
1543 | /* Notify any thread that might be waiting for this event */ |
1544 | ice_aq_check_events(pf, opcode, &event); | |
1545 | ||
0b28b702 | 1546 | switch (opcode) { |
250c3b3e | 1547 | case ice_aqc_opc_get_link_status: |
c2a23e00 | 1548 | if (ice_handle_link_event(pf, &event)) |
4015d11e | 1549 | dev_err(dev, "Could not handle link event\n"); |
250c3b3e | 1550 | break; |
2309ae38 BC |
1551 | case ice_aqc_opc_event_lan_overflow: |
1552 | ice_vf_lan_overflow_event(pf, &event); | |
1553 | break; | |
1071a835 | 1554 | case ice_mbx_opc_send_msg_to_pf: |
afc24d65 JK |
1555 | data.num_msg_proc = i; |
1556 | data.num_pending_arq = pending; | |
1557 | data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries; | |
1558 | data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; | |
1559 | ||
be96815c | 1560 | ice_vc_process_vf_msg(pf, &event, &data); |
1071a835 | 1561 | break; |
9d3535e7 PSJ |
1562 | case ice_aqc_opc_fw_logs_event: |
1563 | ice_get_fwlog_data(pf, &event); | |
1564 | break; | |
00cc3f1b AV |
1565 | case ice_aqc_opc_lldp_set_mib_change: |
1566 | ice_dcb_process_lldp_set_mib_change(pf, &event); | |
1567 | break; | |
0b28b702 | 1568 | default: |
19cce2c6 | 1569 | dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", |
0b28b702 AV |
1570 | qtype, opcode); |
1571 | break; | |
1572 | } | |
940b61af AV |
1573 | } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); |
1574 | ||
9efe35d0 | 1575 | kfree(event.msg_buf); |
940b61af AV |
1576 | |
1577 | return pending && (i == ICE_DFLT_IRQ_WORK); | |
1578 | } | |
1579 | ||
3d6b640e AV |
1580 | /** |
1581 | * ice_ctrlq_pending - check if there is a difference between ntc and ntu | |
1582 | * @hw: pointer to hardware info | |
1583 | * @cq: control queue information | |
1584 | * | |
1585 | * returns true if there are pending messages in a queue, false if there aren't | |
1586 | */ | |
1587 | static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |
1588 | { | |
1589 | u16 ntu; | |
1590 | ||
1591 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | |
1592 | return cq->rq.next_to_clean != ntu; | |
1593 | } | |
1594 | ||
940b61af AV |
1595 | /** |
1596 | * ice_clean_adminq_subtask - clean the AdminQ rings | |
1597 | * @pf: board private structure | |
1598 | */ | |
1599 | static void ice_clean_adminq_subtask(struct ice_pf *pf) | |
1600 | { | |
1601 | struct ice_hw *hw = &pf->hw; | |
940b61af | 1602 | |
7e408e07 | 1603 | if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) |
940b61af AV |
1604 | return; |
1605 | ||
1606 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) | |
1607 | return; | |
1608 | ||
7e408e07 | 1609 | clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); |
940b61af | 1610 | |
3d6b640e AV |
1611 | /* There might be a situation where new messages arrive to a control |
1612 | * queue between processing the last message and clearing the | |
1613 | * EVENT_PENDING bit. So before exiting, check queue head again (using | |
1614 | * ice_ctrlq_pending) and process new messages if any. | |
1615 | */ | |
1616 | if (ice_ctrlq_pending(hw, &hw->adminq)) | |
1617 | __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); | |
940b61af AV |
1618 | |
1619 | ice_flush(hw); | |
1620 | } | |
1621 | ||
75d2b253 AV |
1622 | /** |
1623 | * ice_clean_mailboxq_subtask - clean the MailboxQ rings | |
1624 | * @pf: board private structure | |
1625 | */ | |
1626 | static void ice_clean_mailboxq_subtask(struct ice_pf *pf) | |
1627 | { | |
1628 | struct ice_hw *hw = &pf->hw; | |
1629 | ||
7e408e07 | 1630 | if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) |
75d2b253 AV |
1631 | return; |
1632 | ||
1633 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) | |
1634 | return; | |
1635 | ||
7e408e07 | 1636 | clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
75d2b253 AV |
1637 | |
1638 | if (ice_ctrlq_pending(hw, &hw->mailboxq)) | |
1639 | __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); | |
1640 | ||
1641 | ice_flush(hw); | |
1642 | } | |
1643 | ||
8f5ee3c4 JK |
1644 | /** |
1645 | * ice_clean_sbq_subtask - clean the Sideband Queue rings | |
1646 | * @pf: board private structure | |
1647 | */ | |
1648 | static void ice_clean_sbq_subtask(struct ice_pf *pf) | |
1649 | { | |
1650 | struct ice_hw *hw = &pf->hw; | |
1651 | ||
1652 | /* Nothing to do here if sideband queue is not supported */ | |
1653 | if (!ice_is_sbq_supported(hw)) { | |
1654 | clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); | |
1655 | return; | |
1656 | } | |
1657 | ||
1658 | if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) | |
1659 | return; | |
1660 | ||
1661 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) | |
1662 | return; | |
1663 | ||
1664 | clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); | |
1665 | ||
1666 | if (ice_ctrlq_pending(hw, &hw->sbq)) | |
1667 | __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); | |
1668 | ||
1669 | ice_flush(hw); | |
1670 | } | |
1671 | ||
940b61af AV |
1672 | /** |
1673 | * ice_service_task_schedule - schedule the service task to wake up | |
1674 | * @pf: board private structure | |
1675 | * | |
1676 | * If not already scheduled, this puts the task into the work queue. | |
1677 | */ | |
28bf2672 | 1678 | void ice_service_task_schedule(struct ice_pf *pf) |
940b61af | 1679 | { |
7e408e07 AV |
1680 | if (!test_bit(ICE_SERVICE_DIS, pf->state) && |
1681 | !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && | |
1682 | !test_bit(ICE_NEEDS_RESTART, pf->state)) | |
940b61af AV |
1683 | queue_work(ice_wq, &pf->serv_task); |
1684 | } | |
1685 | ||
1686 | /** | |
1687 | * ice_service_task_complete - finish up the service task | |
1688 | * @pf: board private structure | |
1689 | */ | |
1690 | static void ice_service_task_complete(struct ice_pf *pf) | |
1691 | { | |
7e408e07 | 1692 | WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); |
940b61af AV |
1693 | |
1694 | /* force memory (pf->state) to sync before next service task */ | |
1695 | smp_mb__before_atomic(); | |
7e408e07 | 1696 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
940b61af AV |
1697 | } |
1698 | ||
8d81fa55 AA |
1699 | /** |
1700 | * ice_service_task_stop - stop service task and cancel works | |
1701 | * @pf: board private structure | |
769c500d | 1702 | * |
7e408e07 | 1703 | * Return 0 if the ICE_SERVICE_DIS bit was not already set, |
769c500d | 1704 | * 1 otherwise. |
8d81fa55 | 1705 | */ |
769c500d | 1706 | static int ice_service_task_stop(struct ice_pf *pf) |
8d81fa55 | 1707 | { |
769c500d AA |
1708 | int ret; |
1709 | ||
7e408e07 | 1710 | ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); |
8d81fa55 AA |
1711 | |
1712 | if (pf->serv_tmr.function) | |
1713 | del_timer_sync(&pf->serv_tmr); | |
1714 | if (pf->serv_task.func) | |
1715 | cancel_work_sync(&pf->serv_task); | |
1716 | ||
7e408e07 | 1717 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
769c500d | 1718 | return ret; |
8d81fa55 AA |
1719 | } |
1720 | ||
5995b6d0 BC |
1721 | /** |
1722 | * ice_service_task_restart - restart service task and schedule works | |
1723 | * @pf: board private structure | |
1724 | * | |
1725 | * This function is needed for suspend and resume works (e.g WoL scenario) | |
1726 | */ | |
1727 | static void ice_service_task_restart(struct ice_pf *pf) | |
1728 | { | |
7e408e07 | 1729 | clear_bit(ICE_SERVICE_DIS, pf->state); |
5995b6d0 BC |
1730 | ice_service_task_schedule(pf); |
1731 | } | |
1732 | ||
940b61af AV |
1733 | /** |
1734 | * ice_service_timer - timer callback to schedule service task | |
1735 | * @t: pointer to timer_list | |
1736 | */ | |
1737 | static void ice_service_timer(struct timer_list *t) | |
1738 | { | |
1739 | struct ice_pf *pf = from_timer(pf, t, serv_tmr); | |
1740 | ||
1741 | mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); | |
1742 | ice_service_task_schedule(pf); | |
1743 | } | |
1744 | ||
b3969fd7 SM |
1745 | /** |
1746 | * ice_handle_mdd_event - handle malicious driver detect event | |
1747 | * @pf: pointer to the PF structure | |
1748 | * | |
9d5c5a52 PG |
1749 | * Called from service task. OICR interrupt handler indicates MDD event. |
1750 | * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log | |
1751 | * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events | |
1752 | * disable the queue, the PF can be configured to reset the VF using ethtool | |
1753 | * private flag mdd-auto-reset-vf. | |
b3969fd7 SM |
1754 | */ |
1755 | static void ice_handle_mdd_event(struct ice_pf *pf) | |
1756 | { | |
4015d11e | 1757 | struct device *dev = ice_pf_to_dev(pf); |
b3969fd7 | 1758 | struct ice_hw *hw = &pf->hw; |
c4c2c7db JK |
1759 | struct ice_vf *vf; |
1760 | unsigned int bkt; | |
b3969fd7 SM |
1761 | u32 reg; |
1762 | ||
7e408e07 | 1763 | if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { |
9d5c5a52 PG |
1764 | /* Since the VF MDD event logging is rate limited, check if |
1765 | * there are pending MDD events. | |
1766 | */ | |
1767 | ice_print_vfs_mdd_events(pf); | |
b3969fd7 | 1768 | return; |
9d5c5a52 | 1769 | } |
b3969fd7 | 1770 | |
9d5c5a52 | 1771 | /* find what triggered an MDD event */ |
b3969fd7 SM |
1772 | reg = rd32(hw, GL_MDET_TX_PQM); |
1773 | if (reg & GL_MDET_TX_PQM_VALID_M) { | |
5a259f8e JB |
1774 | u8 pf_num = FIELD_GET(GL_MDET_TX_PQM_PF_NUM_M, reg); |
1775 | u16 vf_num = FIELD_GET(GL_MDET_TX_PQM_VF_NUM_M, reg); | |
1776 | u8 event = FIELD_GET(GL_MDET_TX_PQM_MAL_TYPE_M, reg); | |
1777 | u16 queue = FIELD_GET(GL_MDET_TX_PQM_QNUM_M, reg); | |
b3969fd7 SM |
1778 | |
1779 | if (netif_msg_tx_err(pf)) | |
4015d11e | 1780 | dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
b3969fd7 SM |
1781 | event, queue, pf_num, vf_num); |
1782 | wr32(hw, GL_MDET_TX_PQM, 0xffffffff); | |
b3969fd7 SM |
1783 | } |
1784 | ||
ba1124f5 | 1785 | reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw)); |
b3969fd7 | 1786 | if (reg & GL_MDET_TX_TCLAN_VALID_M) { |
5a259f8e JB |
1787 | u8 pf_num = FIELD_GET(GL_MDET_TX_TCLAN_PF_NUM_M, reg); |
1788 | u16 vf_num = FIELD_GET(GL_MDET_TX_TCLAN_VF_NUM_M, reg); | |
1789 | u8 event = FIELD_GET(GL_MDET_TX_TCLAN_MAL_TYPE_M, reg); | |
1790 | u16 queue = FIELD_GET(GL_MDET_TX_TCLAN_QNUM_M, reg); | |
b3969fd7 | 1791 | |
1d8bd992 | 1792 | if (netif_msg_tx_err(pf)) |
4015d11e | 1793 | dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
b3969fd7 | 1794 | event, queue, pf_num, vf_num); |
ba1124f5 | 1795 | wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); |
b3969fd7 SM |
1796 | } |
1797 | ||
1798 | reg = rd32(hw, GL_MDET_RX); | |
1799 | if (reg & GL_MDET_RX_VALID_M) { | |
5a259f8e JB |
1800 | u8 pf_num = FIELD_GET(GL_MDET_RX_PF_NUM_M, reg); |
1801 | u16 vf_num = FIELD_GET(GL_MDET_RX_VF_NUM_M, reg); | |
1802 | u8 event = FIELD_GET(GL_MDET_RX_MAL_TYPE_M, reg); | |
1803 | u16 queue = FIELD_GET(GL_MDET_RX_QNUM_M, reg); | |
b3969fd7 SM |
1804 | |
1805 | if (netif_msg_rx_err(pf)) | |
4015d11e | 1806 | dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", |
b3969fd7 SM |
1807 | event, queue, pf_num, vf_num); |
1808 | wr32(hw, GL_MDET_RX, 0xffffffff); | |
b3969fd7 SM |
1809 | } |
1810 | ||
9d5c5a52 PG |
1811 | /* check to see if this PF caused an MDD event */ |
1812 | reg = rd32(hw, PF_MDET_TX_PQM); | |
1813 | if (reg & PF_MDET_TX_PQM_VALID_M) { | |
1814 | wr32(hw, PF_MDET_TX_PQM, 0xFFFF); | |
1815 | if (netif_msg_tx_err(pf)) | |
1816 | dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); | |
1817 | } | |
b3969fd7 | 1818 | |
ba1124f5 | 1819 | reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw)); |
9d5c5a52 | 1820 | if (reg & PF_MDET_TX_TCLAN_VALID_M) { |
ba1124f5 | 1821 | wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff); |
9d5c5a52 PG |
1822 | if (netif_msg_tx_err(pf)) |
1823 | dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); | |
1824 | } | |
b3969fd7 | 1825 | |
9d5c5a52 PG |
1826 | reg = rd32(hw, PF_MDET_RX); |
1827 | if (reg & PF_MDET_RX_VALID_M) { | |
1828 | wr32(hw, PF_MDET_RX, 0xFFFF); | |
1829 | if (netif_msg_rx_err(pf)) | |
1830 | dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); | |
b3969fd7 SM |
1831 | } |
1832 | ||
9d5c5a52 PG |
1833 | /* Check to see if one of the VFs caused an MDD event, and then |
1834 | * increment counters and set print pending | |
1835 | */ | |
3d5985a1 | 1836 | mutex_lock(&pf->vfs.table_lock); |
c4c2c7db JK |
1837 | ice_for_each_vf(pf, bkt, vf) { |
1838 | reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); | |
7c4bc1f5 | 1839 | if (reg & VP_MDET_TX_PQM_VALID_M) { |
c4c2c7db | 1840 | wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); |
9d5c5a52 | 1841 | vf->mdd_tx_events.count++; |
7e408e07 | 1842 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1843 | if (netif_msg_tx_err(pf)) |
1844 | dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", | |
c4c2c7db | 1845 | vf->vf_id); |
7c4bc1f5 AV |
1846 | } |
1847 | ||
c4c2c7db | 1848 | reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); |
7c4bc1f5 | 1849 | if (reg & VP_MDET_TX_TCLAN_VALID_M) { |
c4c2c7db | 1850 | wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); |
9d5c5a52 | 1851 | vf->mdd_tx_events.count++; |
7e408e07 | 1852 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1853 | if (netif_msg_tx_err(pf)) |
1854 | dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", | |
c4c2c7db | 1855 | vf->vf_id); |
7c4bc1f5 AV |
1856 | } |
1857 | ||
c4c2c7db | 1858 | reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); |
7c4bc1f5 | 1859 | if (reg & VP_MDET_TX_TDPU_VALID_M) { |
c4c2c7db | 1860 | wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); |
9d5c5a52 | 1861 | vf->mdd_tx_events.count++; |
7e408e07 | 1862 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1863 | if (netif_msg_tx_err(pf)) |
1864 | dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", | |
c4c2c7db | 1865 | vf->vf_id); |
7c4bc1f5 AV |
1866 | } |
1867 | ||
c4c2c7db | 1868 | reg = rd32(hw, VP_MDET_RX(vf->vf_id)); |
7c4bc1f5 | 1869 | if (reg & VP_MDET_RX_VALID_M) { |
c4c2c7db | 1870 | wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); |
9d5c5a52 | 1871 | vf->mdd_rx_events.count++; |
7e408e07 | 1872 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1873 | if (netif_msg_rx_err(pf)) |
1874 | dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", | |
c4c2c7db | 1875 | vf->vf_id); |
9d5c5a52 PG |
1876 | |
1877 | /* Since the queue is disabled on VF Rx MDD events, the | |
1878 | * PF can be configured to reset the VF through ethtool | |
1879 | * private flag mdd-auto-reset-vf. | |
1880 | */ | |
7438a3b0 PG |
1881 | if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { |
1882 | /* VF MDD event counters will be cleared by | |
1883 | * reset, so print the event prior to reset. | |
1884 | */ | |
1885 | ice_print_vf_rx_mdd_event(vf); | |
f5f085c0 | 1886 | ice_reset_vf(vf, ICE_VF_RESET_LOCK); |
7438a3b0 | 1887 | } |
7c4bc1f5 AV |
1888 | } |
1889 | } | |
3d5985a1 | 1890 | mutex_unlock(&pf->vfs.table_lock); |
9d5c5a52 PG |
1891 | |
1892 | ice_print_vfs_mdd_events(pf); | |
b3969fd7 SM |
1893 | } |
1894 | ||
6d599946 TN |
1895 | /** |
1896 | * ice_force_phys_link_state - Force the physical link state | |
1897 | * @vsi: VSI to force the physical link state to up/down | |
1898 | * @link_up: true/false indicates to set the physical link to up/down | |
1899 | * | |
1900 | * Force the physical link state by getting the current PHY capabilities from | |
1901 | * hardware and setting the PHY config based on the determined capabilities. If | |
1902 | * link changes a link event will be triggered because both the Enable Automatic | |
1903 | * Link Update and LESM Enable bits are set when setting the PHY capabilities. | |
1904 | * | |
1905 | * Returns 0 on success, negative on failure | |
1906 | */ | |
1907 | static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) | |
1908 | { | |
1909 | struct ice_aqc_get_phy_caps_data *pcaps; | |
1910 | struct ice_aqc_set_phy_cfg_data *cfg; | |
1911 | struct ice_port_info *pi; | |
1912 | struct device *dev; | |
1913 | int retcode; | |
1914 | ||
1915 | if (!vsi || !vsi->port_info || !vsi->back) | |
1916 | return -EINVAL; | |
1917 | if (vsi->type != ICE_VSI_PF) | |
1918 | return 0; | |
1919 | ||
9a946843 | 1920 | dev = ice_pf_to_dev(vsi->back); |
6d599946 TN |
1921 | |
1922 | pi = vsi->port_info; | |
1923 | ||
9efe35d0 | 1924 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
6d599946 TN |
1925 | if (!pcaps) |
1926 | return -ENOMEM; | |
1927 | ||
d6730a87 | 1928 | retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
6d599946 TN |
1929 | NULL); |
1930 | if (retcode) { | |
19cce2c6 | 1931 | dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", |
6d599946 TN |
1932 | vsi->vsi_num, retcode); |
1933 | retcode = -EIO; | |
1934 | goto out; | |
1935 | } | |
1936 | ||
1937 | /* No change in link */ | |
1938 | if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && | |
1939 | link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) | |
1940 | goto out; | |
1941 | ||
1a3571b5 PG |
1942 | /* Use the current user PHY configuration. The current user PHY |
1943 | * configuration is initialized during probe from PHY capabilities | |
1944 | * software mode, and updated on set PHY configuration. | |
1945 | */ | |
1946 | cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); | |
6d599946 TN |
1947 | if (!cfg) { |
1948 | retcode = -ENOMEM; | |
1949 | goto out; | |
1950 | } | |
1951 | ||
1a3571b5 | 1952 | cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; |
6d599946 TN |
1953 | if (link_up) |
1954 | cfg->caps |= ICE_AQ_PHY_ENA_LINK; | |
1955 | else | |
1956 | cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; | |
1957 | ||
1a3571b5 | 1958 | retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); |
6d599946 TN |
1959 | if (retcode) { |
1960 | dev_err(dev, "Failed to set phy config, VSI %d error %d\n", | |
1961 | vsi->vsi_num, retcode); | |
1962 | retcode = -EIO; | |
1963 | } | |
1964 | ||
9efe35d0 | 1965 | kfree(cfg); |
6d599946 | 1966 | out: |
9efe35d0 | 1967 | kfree(pcaps); |
6d599946 TN |
1968 | return retcode; |
1969 | } | |
1970 | ||
1971 | /** | |
1a3571b5 PG |
1972 | * ice_init_nvm_phy_type - Initialize the NVM PHY type |
1973 | * @pi: port info structure | |
1974 | * | |
ea78ce4d | 1975 | * Initialize nvm_phy_type_[low|high] for link lenient mode support |
1a3571b5 PG |
1976 | */ |
1977 | static int ice_init_nvm_phy_type(struct ice_port_info *pi) | |
1978 | { | |
1979 | struct ice_aqc_get_phy_caps_data *pcaps; | |
1980 | struct ice_pf *pf = pi->hw->back; | |
2ccc1c1c | 1981 | int err; |
1a3571b5 PG |
1982 | |
1983 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); | |
1984 | if (!pcaps) | |
1985 | return -ENOMEM; | |
1986 | ||
2ccc1c1c TN |
1987 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, |
1988 | pcaps, NULL); | |
1a3571b5 | 1989 | |
2ccc1c1c | 1990 | if (err) { |
1a3571b5 | 1991 | dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
1a3571b5 PG |
1992 | goto out; |
1993 | } | |
1994 | ||
1995 | pf->nvm_phy_type_hi = pcaps->phy_type_high; | |
1996 | pf->nvm_phy_type_lo = pcaps->phy_type_low; | |
1997 | ||
1998 | out: | |
1999 | kfree(pcaps); | |
2000 | return err; | |
2001 | } | |
2002 | ||
ea78ce4d PG |
2003 | /** |
2004 | * ice_init_link_dflt_override - Initialize link default override | |
2005 | * @pi: port info structure | |
b4e813dd BA |
2006 | * |
2007 | * Initialize link default override and PHY total port shutdown during probe | |
ea78ce4d PG |
2008 | */ |
2009 | static void ice_init_link_dflt_override(struct ice_port_info *pi) | |
2010 | { | |
2011 | struct ice_link_default_override_tlv *ldo; | |
2012 | struct ice_pf *pf = pi->hw->back; | |
2013 | ||
2014 | ldo = &pf->link_dflt_override; | |
b4e813dd BA |
2015 | if (ice_get_link_default_override(ldo, pi)) |
2016 | return; | |
2017 | ||
2018 | if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) | |
2019 | return; | |
2020 | ||
2021 | /* Enable Total Port Shutdown (override/replace link-down-on-close | |
2022 | * ethtool private flag) for ports with Port Disable bit set. | |
2023 | */ | |
2024 | set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); | |
2025 | set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); | |
ea78ce4d PG |
2026 | } |
2027 | ||
2028 | /** | |
2029 | * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings | |
2030 | * @pi: port info structure | |
2031 | * | |
0a02944f | 2032 | * If default override is enabled, initialize the user PHY cfg speed and FEC |
ea78ce4d PG |
2033 | * settings using the default override mask from the NVM. |
2034 | * | |
2035 | * The PHY should only be configured with the default override settings the | |
7e408e07 | 2036 | * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state |
ea78ce4d PG |
2037 | * is used to indicate that the user PHY cfg default override is initialized |
2038 | * and the PHY has not been configured with the default override settings. The | |
2039 | * state is set here, and cleared in ice_configure_phy the first time the PHY is | |
2040 | * configured. | |
0a02944f AV |
2041 | * |
2042 | * This function should be called only if the FW doesn't support default | |
2043 | * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. | |
ea78ce4d PG |
2044 | */ |
2045 | static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) | |
2046 | { | |
2047 | struct ice_link_default_override_tlv *ldo; | |
2048 | struct ice_aqc_set_phy_cfg_data *cfg; | |
2049 | struct ice_phy_info *phy = &pi->phy; | |
2050 | struct ice_pf *pf = pi->hw->back; | |
2051 | ||
2052 | ldo = &pf->link_dflt_override; | |
2053 | ||
2054 | /* If link default override is enabled, use to mask NVM PHY capabilities | |
2055 | * for speed and FEC default configuration. | |
2056 | */ | |
2057 | cfg = &phy->curr_user_phy_cfg; | |
2058 | ||
2059 | if (ldo->phy_type_low || ldo->phy_type_high) { | |
2060 | cfg->phy_type_low = pf->nvm_phy_type_lo & | |
2061 | cpu_to_le64(ldo->phy_type_low); | |
2062 | cfg->phy_type_high = pf->nvm_phy_type_hi & | |
2063 | cpu_to_le64(ldo->phy_type_high); | |
2064 | } | |
2065 | cfg->link_fec_opt = ldo->fec_options; | |
2066 | phy->curr_user_fec_req = ICE_FEC_AUTO; | |
2067 | ||
7e408e07 | 2068 | set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); |
ea78ce4d PG |
2069 | } |
2070 | ||
1a3571b5 PG |
2071 | /** |
2072 | * ice_init_phy_user_cfg - Initialize the PHY user configuration | |
2073 | * @pi: port info structure | |
2074 | * | |
2075 | * Initialize the current user PHY configuration, speed, FEC, and FC requested | |
2076 | * mode to default. The PHY defaults are from get PHY capabilities topology | |
2077 | * with media so call when media is first available. An error is returned if | |
2078 | * called when media is not available. The PHY initialization completed state is | |
2079 | * set here. | |
2080 | * | |
2081 | * These configurations are used when setting PHY | |
2082 | * configuration. The user PHY configuration is updated on set PHY | |
2083 | * configuration. Returns 0 on success, negative on failure | |
2084 | */ | |
2085 | static int ice_init_phy_user_cfg(struct ice_port_info *pi) | |
2086 | { | |
2087 | struct ice_aqc_get_phy_caps_data *pcaps; | |
2088 | struct ice_phy_info *phy = &pi->phy; | |
2089 | struct ice_pf *pf = pi->hw->back; | |
2ccc1c1c | 2090 | int err; |
1a3571b5 PG |
2091 | |
2092 | if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) | |
2093 | return -EIO; | |
2094 | ||
1a3571b5 PG |
2095 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
2096 | if (!pcaps) | |
2097 | return -ENOMEM; | |
2098 | ||
0a02944f | 2099 | if (ice_fw_supports_report_dflt_cfg(pi->hw)) |
2ccc1c1c TN |
2100 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, |
2101 | pcaps, NULL); | |
0a02944f | 2102 | else |
2ccc1c1c TN |
2103 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, |
2104 | pcaps, NULL); | |
2105 | if (err) { | |
1a3571b5 | 2106 | dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); |
1a3571b5 PG |
2107 | goto err_out; |
2108 | } | |
2109 | ||
ea78ce4d PG |
2110 | ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); |
2111 | ||
2112 | /* check if lenient mode is supported and enabled */ | |
dc6aaa13 | 2113 | if (ice_fw_supports_link_override(pi->hw) && |
ea78ce4d PG |
2114 | !(pcaps->module_compliance_enforcement & |
2115 | ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { | |
2116 | set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); | |
2117 | ||
0a02944f AV |
2118 | /* if the FW supports default PHY configuration mode, then the driver |
2119 | * does not have to apply link override settings. If not, | |
2120 | * initialize user PHY configuration with link override values | |
ea78ce4d | 2121 | */ |
0a02944f AV |
2122 | if (!ice_fw_supports_report_dflt_cfg(pi->hw) && |
2123 | (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { | |
ea78ce4d PG |
2124 | ice_init_phy_cfg_dflt_override(pi); |
2125 | goto out; | |
2126 | } | |
2127 | } | |
2128 | ||
0a02944f AV |
2129 | /* if link default override is not enabled, set user flow control and |
2130 | * FEC settings based on what get_phy_caps returned | |
ea78ce4d | 2131 | */ |
1a3571b5 PG |
2132 | phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, |
2133 | pcaps->link_fec_options); | |
2134 | phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); | |
2135 | ||
ea78ce4d | 2136 | out: |
1a3571b5 | 2137 | phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; |
7e408e07 | 2138 | set_bit(ICE_PHY_INIT_COMPLETE, pf->state); |
1a3571b5 PG |
2139 | err_out: |
2140 | kfree(pcaps); | |
2141 | return err; | |
2142 | } | |
2143 | ||
2144 | /** | |
2145 | * ice_configure_phy - configure PHY | |
2146 | * @vsi: VSI of PHY | |
2147 | * | |
2148 | * Set the PHY configuration. If the current PHY configuration is the same as | |
2149 | * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise | |
2150 | * configure the based get PHY capabilities for topology with media. | |
2151 | */ | |
2152 | static int ice_configure_phy(struct ice_vsi *vsi) | |
2153 | { | |
2154 | struct device *dev = ice_pf_to_dev(vsi->back); | |
efc1eddb | 2155 | struct ice_port_info *pi = vsi->port_info; |
1a3571b5 PG |
2156 | struct ice_aqc_get_phy_caps_data *pcaps; |
2157 | struct ice_aqc_set_phy_cfg_data *cfg; | |
efc1eddb AV |
2158 | struct ice_phy_info *phy = &pi->phy; |
2159 | struct ice_pf *pf = vsi->back; | |
2ccc1c1c | 2160 | int err; |
1a3571b5 | 2161 | |
1a3571b5 | 2162 | /* Ensure we have media as we cannot configure a medialess port */ |
efc1eddb | 2163 | if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
6a8d8bb5 | 2164 | return -ENOMEDIUM; |
1a3571b5 PG |
2165 | |
2166 | ice_print_topo_conflict(vsi); | |
2167 | ||
4fc5fbee AV |
2168 | if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && |
2169 | phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) | |
1a3571b5 PG |
2170 | return -EPERM; |
2171 | ||
efc1eddb | 2172 | if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) |
1a3571b5 PG |
2173 | return ice_force_phys_link_state(vsi, true); |
2174 | ||
2175 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); | |
2176 | if (!pcaps) | |
2177 | return -ENOMEM; | |
2178 | ||
2179 | /* Get current PHY config */ | |
2ccc1c1c TN |
2180 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
2181 | NULL); | |
2182 | if (err) { | |
5f87ec48 | 2183 | dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n", |
2ccc1c1c | 2184 | vsi->vsi_num, err); |
1a3571b5 PG |
2185 | goto done; |
2186 | } | |
2187 | ||
2188 | /* If PHY enable link is configured and configuration has not changed, | |
2189 | * there's nothing to do | |
2190 | */ | |
2191 | if (pcaps->caps & ICE_AQC_PHY_EN_LINK && | |
efc1eddb | 2192 | ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) |
1a3571b5 PG |
2193 | goto done; |
2194 | ||
2195 | /* Use PHY topology as baseline for configuration */ | |
2196 | memset(pcaps, 0, sizeof(*pcaps)); | |
0a02944f | 2197 | if (ice_fw_supports_report_dflt_cfg(pi->hw)) |
2ccc1c1c TN |
2198 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, |
2199 | pcaps, NULL); | |
0a02944f | 2200 | else |
2ccc1c1c TN |
2201 | err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, |
2202 | pcaps, NULL); | |
2203 | if (err) { | |
5f87ec48 | 2204 | dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n", |
2ccc1c1c | 2205 | vsi->vsi_num, err); |
1a3571b5 PG |
2206 | goto done; |
2207 | } | |
2208 | ||
2209 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | |
2210 | if (!cfg) { | |
2211 | err = -ENOMEM; | |
2212 | goto done; | |
2213 | } | |
2214 | ||
ea78ce4d | 2215 | ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); |
1a3571b5 PG |
2216 | |
2217 | /* Speed - If default override pending, use curr_user_phy_cfg set in | |
2218 | * ice_init_phy_user_cfg_ldo. | |
2219 | */ | |
7e408e07 | 2220 | if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, |
ea78ce4d | 2221 | vsi->back->state)) { |
efc1eddb AV |
2222 | cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; |
2223 | cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; | |
ea78ce4d PG |
2224 | } else { |
2225 | u64 phy_low = 0, phy_high = 0; | |
2226 | ||
2227 | ice_update_phy_type(&phy_low, &phy_high, | |
2228 | pi->phy.curr_user_speed_req); | |
2229 | cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); | |
2230 | cfg->phy_type_high = pcaps->phy_type_high & | |
2231 | cpu_to_le64(phy_high); | |
2232 | } | |
1a3571b5 PG |
2233 | |
2234 | /* Can't provide what was requested; use PHY capabilities */ | |
2235 | if (!cfg->phy_type_low && !cfg->phy_type_high) { | |
2236 | cfg->phy_type_low = pcaps->phy_type_low; | |
2237 | cfg->phy_type_high = pcaps->phy_type_high; | |
2238 | } | |
2239 | ||
2240 | /* FEC */ | |
efc1eddb | 2241 | ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); |
1a3571b5 PG |
2242 | |
2243 | /* Can't provide what was requested; use PHY capabilities */ | |
2244 | if (cfg->link_fec_opt != | |
2245 | (cfg->link_fec_opt & pcaps->link_fec_options)) { | |
2246 | cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; | |
2247 | cfg->link_fec_opt = pcaps->link_fec_options; | |
2248 | } | |
2249 | ||
2250 | /* Flow Control - always supported; no need to check against | |
2251 | * capabilities | |
2252 | */ | |
efc1eddb | 2253 | ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); |
1a3571b5 PG |
2254 | |
2255 | /* Enable link and link update */ | |
2256 | cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; | |
2257 | ||
2ccc1c1c | 2258 | err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); |
c1484691 | 2259 | if (err) |
5f87ec48 | 2260 | dev_err(dev, "Failed to set phy config, VSI %d error %d\n", |
2ccc1c1c | 2261 | vsi->vsi_num, err); |
1a3571b5 PG |
2262 | |
2263 | kfree(cfg); | |
2264 | done: | |
2265 | kfree(pcaps); | |
2266 | return err; | |
2267 | } | |
2268 | ||
2269 | /** | |
2270 | * ice_check_media_subtask - Check for media | |
6d599946 | 2271 | * @pf: pointer to PF struct |
1a3571b5 PG |
2272 | * |
2273 | * If media is available, then initialize PHY user configuration if it is not | |
2274 | * been, and configure the PHY if the interface is up. | |
6d599946 TN |
2275 | */ |
2276 | static void ice_check_media_subtask(struct ice_pf *pf) | |
2277 | { | |
2278 | struct ice_port_info *pi; | |
2279 | struct ice_vsi *vsi; | |
2280 | int err; | |
2281 | ||
1a3571b5 PG |
2282 | /* No need to check for media if it's already present */ |
2283 | if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) | |
6d599946 TN |
2284 | return; |
2285 | ||
1a3571b5 PG |
2286 | vsi = ice_get_main_vsi(pf); |
2287 | if (!vsi) | |
6d599946 TN |
2288 | return; |
2289 | ||
2290 | /* Refresh link info and check if media is present */ | |
2291 | pi = vsi->port_info; | |
2292 | err = ice_update_link_info(pi); | |
2293 | if (err) | |
2294 | return; | |
2295 | ||
99d40752 | 2296 | ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); |
c77849f5 | 2297 | |
6d599946 | 2298 | if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
7e408e07 | 2299 | if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) |
1a3571b5 PG |
2300 | ice_init_phy_user_cfg(pi); |
2301 | ||
2302 | /* PHY settings are reset on media insertion, reconfigure | |
2303 | * PHY to preserve settings. | |
2304 | */ | |
e97fb1ae | 2305 | if (test_bit(ICE_VSI_DOWN, vsi->state) && |
1a3571b5 | 2306 | test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) |
6d599946 | 2307 | return; |
1a3571b5 PG |
2308 | |
2309 | err = ice_configure_phy(vsi); | |
2310 | if (!err) | |
2311 | clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); | |
6d599946 TN |
2312 | |
2313 | /* A Link Status Event will be generated; the event handler | |
2314 | * will complete bringing the interface up | |
2315 | */ | |
2316 | } | |
2317 | } | |
2318 | ||
940b61af AV |
2319 | /** |
2320 | * ice_service_task - manage and run subtasks | |
2321 | * @work: pointer to work_struct contained by the PF struct | |
2322 | */ | |
2323 | static void ice_service_task(struct work_struct *work) | |
2324 | { | |
2325 | struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); | |
2326 | unsigned long start_time = jiffies; | |
2327 | ||
2328 | /* subtasks */ | |
0b28b702 AV |
2329 | |
2330 | /* process reset requests first */ | |
2331 | ice_reset_subtask(pf); | |
2332 | ||
0f9d5027 | 2333 | /* bail if a reset/recovery cycle is pending or rebuild failed */ |
5df7e45d | 2334 | if (ice_is_reset_in_progress(pf->state) || |
7e408e07 AV |
2335 | test_bit(ICE_SUSPENDED, pf->state) || |
2336 | test_bit(ICE_NEEDS_RESTART, pf->state)) { | |
0b28b702 AV |
2337 | ice_service_task_complete(pf); |
2338 | return; | |
2339 | } | |
2340 | ||
32d53c0a AL |
2341 | if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { |
2342 | struct iidc_event *event; | |
2343 | ||
2344 | event = kzalloc(sizeof(*event), GFP_KERNEL); | |
2345 | if (event) { | |
2346 | set_bit(IIDC_EVENT_CRIT_ERR, event->type); | |
2347 | /* report the entire OICR value to AUX driver */ | |
2348 | swap(event->reg, pf->oicr_err_reg); | |
2349 | ice_send_event_to_aux(pf, event); | |
2350 | kfree(event); | |
2351 | } | |
2352 | } | |
2353 | ||
248401cb DE |
2354 | /* unplug aux dev per request, if an unplug request came in |
2355 | * while processing a plug request, this will handle it | |
2356 | */ | |
2357 | if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) | |
2358 | ice_unplug_aux_dev(pf); | |
5dbbbd01 | 2359 | |
248401cb DE |
2360 | /* Plug aux device per request */ |
2361 | if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) | |
2362 | ice_plug_aux_dev(pf); | |
5cb1ebdb | 2363 | |
97b01291 DE |
2364 | if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { |
2365 | struct iidc_event *event; | |
2366 | ||
2367 | event = kzalloc(sizeof(*event), GFP_KERNEL); | |
2368 | if (event) { | |
2369 | set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); | |
2370 | ice_send_event_to_aux(pf, event); | |
2371 | kfree(event); | |
2372 | } | |
2373 | } | |
2374 | ||
462acf6a | 2375 | ice_clean_adminq_subtask(pf); |
6d599946 | 2376 | ice_check_media_subtask(pf); |
b3969fd7 | 2377 | ice_check_for_hang_subtask(pf); |
e94d4478 | 2378 | ice_sync_fltr_subtask(pf); |
b3969fd7 | 2379 | ice_handle_mdd_event(pf); |
fcea6f3d | 2380 | ice_watchdog_subtask(pf); |
462acf6a TN |
2381 | |
2382 | if (ice_is_safe_mode(pf)) { | |
2383 | ice_service_task_complete(pf); | |
2384 | return; | |
2385 | } | |
2386 | ||
2387 | ice_process_vflr_event(pf); | |
75d2b253 | 2388 | ice_clean_mailboxq_subtask(pf); |
8f5ee3c4 | 2389 | ice_clean_sbq_subtask(pf); |
28bf2672 | 2390 | ice_sync_arfs_fltrs(pf); |
d6218317 | 2391 | ice_flush_fdir_ctx(pf); |
7e408e07 AV |
2392 | |
2393 | /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ | |
940b61af AV |
2394 | ice_service_task_complete(pf); |
2395 | ||
2396 | /* If the tasks have taken longer than one service timer period | |
2397 | * or there is more work to be done, reset the service timer to | |
2398 | * schedule the service task now. | |
2399 | */ | |
2400 | if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || | |
7e408e07 AV |
2401 | test_bit(ICE_MDD_EVENT_PENDING, pf->state) || |
2402 | test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || | |
2403 | test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || | |
2404 | test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || | |
8f5ee3c4 | 2405 | test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || |
7e408e07 | 2406 | test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) |
940b61af AV |
2407 | mod_timer(&pf->serv_tmr, jiffies); |
2408 | } | |
2409 | ||
f31e4b6f AV |
2410 | /** |
2411 | * ice_set_ctrlq_len - helper function to set controlq length | |
f9867df6 | 2412 | * @hw: pointer to the HW instance |
f31e4b6f AV |
2413 | */ |
2414 | static void ice_set_ctrlq_len(struct ice_hw *hw) | |
2415 | { | |
2416 | hw->adminq.num_rq_entries = ICE_AQ_LEN; | |
2417 | hw->adminq.num_sq_entries = ICE_AQ_LEN; | |
2418 | hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; | |
2419 | hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; | |
c8a1071d | 2420 | hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; |
11836214 | 2421 | hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; |
75d2b253 AV |
2422 | hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
2423 | hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; | |
8f5ee3c4 JK |
2424 | hw->sbq.num_rq_entries = ICE_SBQ_LEN; |
2425 | hw->sbq.num_sq_entries = ICE_SBQ_LEN; | |
2426 | hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; | |
2427 | hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; | |
f31e4b6f AV |
2428 | } |
2429 | ||
87324e74 HT |
2430 | /** |
2431 | * ice_schedule_reset - schedule a reset | |
2432 | * @pf: board private structure | |
2433 | * @reset: reset being requested | |
2434 | */ | |
2435 | int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) | |
2436 | { | |
2437 | struct device *dev = ice_pf_to_dev(pf); | |
2438 | ||
2439 | /* bail out if earlier reset has failed */ | |
7e408e07 | 2440 | if (test_bit(ICE_RESET_FAILED, pf->state)) { |
87324e74 HT |
2441 | dev_dbg(dev, "earlier reset has failed\n"); |
2442 | return -EIO; | |
2443 | } | |
2444 | /* bail if reset/recovery already in progress */ | |
2445 | if (ice_is_reset_in_progress(pf->state)) { | |
2446 | dev_dbg(dev, "Reset already in progress\n"); | |
2447 | return -EBUSY; | |
2448 | } | |
2449 | ||
2450 | switch (reset) { | |
2451 | case ICE_RESET_PFR: | |
7e408e07 | 2452 | set_bit(ICE_PFR_REQ, pf->state); |
87324e74 HT |
2453 | break; |
2454 | case ICE_RESET_CORER: | |
7e408e07 | 2455 | set_bit(ICE_CORER_REQ, pf->state); |
87324e74 HT |
2456 | break; |
2457 | case ICE_RESET_GLOBR: | |
7e408e07 | 2458 | set_bit(ICE_GLOBR_REQ, pf->state); |
87324e74 HT |
2459 | break; |
2460 | default: | |
2461 | return -EINVAL; | |
2462 | } | |
2463 | ||
2464 | ice_service_task_schedule(pf); | |
2465 | return 0; | |
2466 | } | |
2467 | ||
cdedef59 AV |
2468 | /** |
2469 | * ice_irq_affinity_notify - Callback for affinity changes | |
2470 | * @notify: context as to what irq was changed | |
2471 | * @mask: the new affinity mask | |
2472 | * | |
2473 | * This is a callback function used by the irq_set_affinity_notifier function | |
2474 | * so that we may register to receive changes to the irq affinity masks. | |
2475 | */ | |
c8b7abdd BA |
2476 | static void |
2477 | ice_irq_affinity_notify(struct irq_affinity_notify *notify, | |
2478 | const cpumask_t *mask) | |
cdedef59 AV |
2479 | { |
2480 | struct ice_q_vector *q_vector = | |
2481 | container_of(notify, struct ice_q_vector, affinity_notify); | |
2482 | ||
2483 | cpumask_copy(&q_vector->affinity_mask, mask); | |
2484 | } | |
2485 | ||
2486 | /** | |
2487 | * ice_irq_affinity_release - Callback for affinity notifier release | |
2488 | * @ref: internal core kernel usage | |
2489 | * | |
2490 | * This is a callback function used by the irq_set_affinity_notifier function | |
2491 | * to inform the current notification subscriber that they will no longer | |
2492 | * receive notifications. | |
2493 | */ | |
2494 | static void ice_irq_affinity_release(struct kref __always_unused *ref) {} | |
2495 | ||
cdedef59 AV |
2496 | /** |
2497 | * ice_vsi_ena_irq - Enable IRQ for the given VSI | |
2498 | * @vsi: the VSI being configured | |
2499 | */ | |
2500 | static int ice_vsi_ena_irq(struct ice_vsi *vsi) | |
2501 | { | |
ba880734 BC |
2502 | struct ice_hw *hw = &vsi->back->hw; |
2503 | int i; | |
cdedef59 | 2504 | |
ba880734 BC |
2505 | ice_for_each_q_vector(vsi, i) |
2506 | ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); | |
cdedef59 AV |
2507 | |
2508 | ice_flush(hw); | |
2509 | return 0; | |
2510 | } | |
2511 | ||
cdedef59 AV |
2512 | /** |
2513 | * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI | |
2514 | * @vsi: the VSI being configured | |
2515 | * @basename: name for the vector | |
2516 | */ | |
2517 | static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) | |
2518 | { | |
2519 | int q_vectors = vsi->num_q_vectors; | |
2520 | struct ice_pf *pf = vsi->back; | |
4015d11e | 2521 | struct device *dev; |
cdedef59 AV |
2522 | int rx_int_idx = 0; |
2523 | int tx_int_idx = 0; | |
2524 | int vector, err; | |
2525 | int irq_num; | |
2526 | ||
4015d11e | 2527 | dev = ice_pf_to_dev(pf); |
cdedef59 AV |
2528 | for (vector = 0; vector < q_vectors; vector++) { |
2529 | struct ice_q_vector *q_vector = vsi->q_vectors[vector]; | |
2530 | ||
4aad5335 | 2531 | irq_num = q_vector->irq.virq; |
cdedef59 | 2532 | |
e72bba21 | 2533 | if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { |
cdedef59 AV |
2534 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2535 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); | |
2536 | tx_int_idx++; | |
e72bba21 | 2537 | } else if (q_vector->rx.rx_ring) { |
cdedef59 AV |
2538 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2539 | "%s-%s-%d", basename, "rx", rx_int_idx++); | |
e72bba21 | 2540 | } else if (q_vector->tx.tx_ring) { |
cdedef59 AV |
2541 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
2542 | "%s-%s-%d", basename, "tx", tx_int_idx++); | |
2543 | } else { | |
2544 | /* skip this unused q_vector */ | |
2545 | continue; | |
2546 | } | |
b03d519d | 2547 | if (vsi->type == ICE_VSI_CTRL && vsi->vf) |
da62c5ff QZ |
2548 | err = devm_request_irq(dev, irq_num, vsi->irq_handler, |
2549 | IRQF_SHARED, q_vector->name, | |
2550 | q_vector); | |
2551 | else | |
2552 | err = devm_request_irq(dev, irq_num, vsi->irq_handler, | |
2553 | 0, q_vector->name, q_vector); | |
cdedef59 | 2554 | if (err) { |
19cce2c6 AV |
2555 | netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", |
2556 | err); | |
cdedef59 AV |
2557 | goto free_q_irqs; |
2558 | } | |
2559 | ||
2560 | /* register for affinity change notifications */ | |
28bf2672 BC |
2561 | if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { |
2562 | struct irq_affinity_notify *affinity_notify; | |
2563 | ||
2564 | affinity_notify = &q_vector->affinity_notify; | |
2565 | affinity_notify->notify = ice_irq_affinity_notify; | |
2566 | affinity_notify->release = ice_irq_affinity_release; | |
2567 | irq_set_affinity_notifier(irq_num, affinity_notify); | |
2568 | } | |
cdedef59 AV |
2569 | |
2570 | /* assign the mask for this irq */ | |
2571 | irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); | |
2572 | } | |
2573 | ||
d7442f51 AL |
2574 | err = ice_set_cpu_rx_rmap(vsi); |
2575 | if (err) { | |
2576 | netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", | |
2577 | vsi->vsi_num, ERR_PTR(err)); | |
2578 | goto free_q_irqs; | |
2579 | } | |
2580 | ||
cdedef59 AV |
2581 | vsi->irqs_ready = true; |
2582 | return 0; | |
2583 | ||
2584 | free_q_irqs: | |
4aad5335 PR |
2585 | while (vector--) { |
2586 | irq_num = vsi->q_vectors[vector]->irq.virq; | |
28bf2672 BC |
2587 | if (!IS_ENABLED(CONFIG_RFS_ACCEL)) |
2588 | irq_set_affinity_notifier(irq_num, NULL); | |
cdedef59 | 2589 | irq_set_affinity_hint(irq_num, NULL); |
4015d11e | 2590 | devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); |
cdedef59 AV |
2591 | } |
2592 | return err; | |
2593 | } | |
2594 | ||
efc2214b MF |
2595 | /** |
2596 | * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP | |
2597 | * @vsi: VSI to setup Tx rings used by XDP | |
2598 | * | |
2599 | * Return 0 on success and negative value on error | |
2600 | */ | |
2601 | static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) | |
2602 | { | |
9a946843 | 2603 | struct device *dev = ice_pf_to_dev(vsi->back); |
9610bd98 MF |
2604 | struct ice_tx_desc *tx_desc; |
2605 | int i, j; | |
efc2214b | 2606 | |
2faf63b6 | 2607 | ice_for_each_xdp_txq(vsi, i) { |
efc2214b | 2608 | u16 xdp_q_idx = vsi->alloc_txq + i; |
288ecf49 | 2609 | struct ice_ring_stats *ring_stats; |
e72bba21 | 2610 | struct ice_tx_ring *xdp_ring; |
efc2214b MF |
2611 | |
2612 | xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); | |
efc2214b MF |
2613 | if (!xdp_ring) |
2614 | goto free_xdp_rings; | |
2615 | ||
288ecf49 BM |
2616 | ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL); |
2617 | if (!ring_stats) { | |
2618 | ice_free_tx_ring(xdp_ring); | |
2619 | goto free_xdp_rings; | |
2620 | } | |
2621 | ||
2622 | xdp_ring->ring_stats = ring_stats; | |
efc2214b MF |
2623 | xdp_ring->q_index = xdp_q_idx; |
2624 | xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; | |
efc2214b MF |
2625 | xdp_ring->vsi = vsi; |
2626 | xdp_ring->netdev = NULL; | |
2627 | xdp_ring->dev = dev; | |
2628 | xdp_ring->count = vsi->num_tx_desc; | |
b1d95cc2 | 2629 | WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); |
efc2214b MF |
2630 | if (ice_setup_tx_ring(xdp_ring)) |
2631 | goto free_xdp_rings; | |
2632 | ice_set_ring_xdp(xdp_ring); | |
22bf877e | 2633 | spin_lock_init(&xdp_ring->tx_lock); |
9610bd98 MF |
2634 | for (j = 0; j < xdp_ring->count; j++) { |
2635 | tx_desc = ICE_TX_DESC(xdp_ring, j); | |
e19778e6 | 2636 | tx_desc->cmd_type_offset_bsz = 0; |
9610bd98 | 2637 | } |
efc2214b MF |
2638 | } |
2639 | ||
2640 | return 0; | |
2641 | ||
2642 | free_xdp_rings: | |
288ecf49 BM |
2643 | for (; i >= 0; i--) { |
2644 | if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { | |
2645 | kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); | |
2646 | vsi->xdp_rings[i]->ring_stats = NULL; | |
efc2214b | 2647 | ice_free_tx_ring(vsi->xdp_rings[i]); |
288ecf49 BM |
2648 | } |
2649 | } | |
efc2214b MF |
2650 | return -ENOMEM; |
2651 | } | |
2652 | ||
2653 | /** | |
2654 | * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI | |
2655 | * @vsi: VSI to set the bpf prog on | |
2656 | * @prog: the bpf prog pointer | |
2657 | */ | |
2658 | static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) | |
2659 | { | |
2660 | struct bpf_prog *old_prog; | |
2661 | int i; | |
2662 | ||
2663 | old_prog = xchg(&vsi->xdp_prog, prog); | |
efc2214b MF |
2664 | ice_for_each_rxq(vsi, i) |
2665 | WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); | |
46974842 MF |
2666 | |
2667 | if (old_prog) | |
2668 | bpf_prog_put(old_prog); | |
efc2214b MF |
2669 | } |
2670 | ||
2671 | /** | |
2672 | * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP | |
2673 | * @vsi: VSI to bring up Tx rings used by XDP | |
2674 | * @prog: bpf program that will be assigned to VSI | |
2675 | * | |
2676 | * Return 0 on success and negative value on error | |
2677 | */ | |
2678 | int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) | |
2679 | { | |
2680 | u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; | |
2681 | int xdp_rings_rem = vsi->num_xdp_txq; | |
2682 | struct ice_pf *pf = vsi->back; | |
2683 | struct ice_qs_cfg xdp_qs_cfg = { | |
2684 | .qs_mutex = &pf->avail_q_mutex, | |
2685 | .pf_map = pf->avail_txqs, | |
2686 | .pf_map_size = pf->max_pf_txqs, | |
2687 | .q_count = vsi->num_xdp_txq, | |
2688 | .scatter_count = ICE_MAX_SCATTER_TXQS, | |
2689 | .vsi_map = vsi->txq_map, | |
2690 | .vsi_map_offset = vsi->alloc_txq, | |
2691 | .mapping_mode = ICE_VSI_MAP_CONTIG | |
2692 | }; | |
4015d11e | 2693 | struct device *dev; |
efc2214b | 2694 | int i, v_idx; |
5518ac2a | 2695 | int status; |
efc2214b | 2696 | |
4015d11e BC |
2697 | dev = ice_pf_to_dev(pf); |
2698 | vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, | |
efc2214b MF |
2699 | sizeof(*vsi->xdp_rings), GFP_KERNEL); |
2700 | if (!vsi->xdp_rings) | |
2701 | return -ENOMEM; | |
2702 | ||
2703 | vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; | |
2704 | if (__ice_vsi_get_qs(&xdp_qs_cfg)) | |
2705 | goto err_map_xdp; | |
2706 | ||
22bf877e MF |
2707 | if (static_key_enabled(&ice_xdp_locking_key)) |
2708 | netdev_warn(vsi->netdev, | |
2709 | "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n"); | |
2710 | ||
efc2214b MF |
2711 | if (ice_xdp_alloc_setup_rings(vsi)) |
2712 | goto clear_xdp_rings; | |
2713 | ||
2714 | /* follow the logic from ice_vsi_map_rings_to_vectors */ | |
2715 | ice_for_each_q_vector(vsi, v_idx) { | |
2716 | struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; | |
2717 | int xdp_rings_per_v, q_id, q_base; | |
2718 | ||
2719 | xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, | |
2720 | vsi->num_q_vectors - v_idx); | |
2721 | q_base = vsi->num_xdp_txq - xdp_rings_rem; | |
2722 | ||
2723 | for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { | |
e72bba21 | 2724 | struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; |
efc2214b MF |
2725 | |
2726 | xdp_ring->q_vector = q_vector; | |
e72bba21 MF |
2727 | xdp_ring->next = q_vector->tx.tx_ring; |
2728 | q_vector->tx.tx_ring = xdp_ring; | |
efc2214b MF |
2729 | } |
2730 | xdp_rings_rem -= xdp_rings_per_v; | |
2731 | } | |
2732 | ||
9ead7e74 MF |
2733 | ice_for_each_rxq(vsi, i) { |
2734 | if (static_key_enabled(&ice_xdp_locking_key)) { | |
2735 | vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; | |
2736 | } else { | |
2737 | struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; | |
2738 | struct ice_tx_ring *ring; | |
2739 | ||
2740 | ice_for_each_tx_ring(ring, q_vector->tx) { | |
2741 | if (ice_ring_is_xdp(ring)) { | |
2742 | vsi->rx_rings[i]->xdp_ring = ring; | |
2743 | break; | |
2744 | } | |
2745 | } | |
2746 | } | |
2747 | ice_tx_xsk_pool(vsi, i); | |
2748 | } | |
2749 | ||
efc2214b MF |
2750 | /* omit the scheduler update if in reset path; XDP queues will be |
2751 | * taken into account at the end of ice_vsi_rebuild, where | |
2752 | * ice_cfg_vsi_lan is being called | |
2753 | */ | |
2754 | if (ice_is_reset_in_progress(pf->state)) | |
2755 | return 0; | |
2756 | ||
2757 | /* tell the Tx scheduler that right now we have | |
2758 | * additional queues | |
2759 | */ | |
2760 | for (i = 0; i < vsi->tc_cfg.numtc; i++) | |
2761 | max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; | |
2762 | ||
2763 | status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, | |
2764 | max_txqs); | |
2765 | if (status) { | |
5f87ec48 TN |
2766 | dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n", |
2767 | status); | |
efc2214b MF |
2768 | goto clear_xdp_rings; |
2769 | } | |
f65ee535 MP |
2770 | |
2771 | /* assign the prog only when it's not already present on VSI; | |
2772 | * this flow is a subject of both ethtool -L and ndo_bpf flows; | |
2773 | * VSI rebuild that happens under ethtool -L can expose us to | |
2774 | * the bpf_prog refcount issues as we would be swapping same | |
2775 | * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put | |
2776 | * on it as it would be treated as an 'old_prog'; for ndo_bpf | |
2777 | * this is not harmful as dev_xdp_install bumps the refcount | |
2778 | * before calling the op exposed by the driver; | |
2779 | */ | |
2780 | if (!ice_is_xdp_ena_vsi(vsi)) | |
2781 | ice_vsi_assign_bpf_prog(vsi, prog); | |
efc2214b MF |
2782 | |
2783 | return 0; | |
2784 | clear_xdp_rings: | |
2faf63b6 | 2785 | ice_for_each_xdp_txq(vsi, i) |
efc2214b MF |
2786 | if (vsi->xdp_rings[i]) { |
2787 | kfree_rcu(vsi->xdp_rings[i], rcu); | |
2788 | vsi->xdp_rings[i] = NULL; | |
2789 | } | |
2790 | ||
2791 | err_map_xdp: | |
2792 | mutex_lock(&pf->avail_q_mutex); | |
2faf63b6 | 2793 | ice_for_each_xdp_txq(vsi, i) { |
efc2214b MF |
2794 | clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
2795 | vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; | |
2796 | } | |
2797 | mutex_unlock(&pf->avail_q_mutex); | |
2798 | ||
4015d11e | 2799 | devm_kfree(dev, vsi->xdp_rings); |
efc2214b MF |
2800 | return -ENOMEM; |
2801 | } | |
2802 | ||
2803 | /** | |
2804 | * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings | |
2805 | * @vsi: VSI to remove XDP rings | |
2806 | * | |
2807 | * Detach XDP rings from irq vectors, clean up the PF bitmap and free | |
2808 | * resources | |
2809 | */ | |
2810 | int ice_destroy_xdp_rings(struct ice_vsi *vsi) | |
2811 | { | |
2812 | u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; | |
2813 | struct ice_pf *pf = vsi->back; | |
2814 | int i, v_idx; | |
2815 | ||
2816 | /* q_vectors are freed in reset path so there's no point in detaching | |
ac382a09 | 2817 | * rings; in case of rebuild being triggered not from reset bits |
efc2214b MF |
2818 | * in pf->state won't be set, so additionally check first q_vector |
2819 | * against NULL | |
2820 | */ | |
2821 | if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) | |
2822 | goto free_qmap; | |
2823 | ||
2824 | ice_for_each_q_vector(vsi, v_idx) { | |
2825 | struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; | |
e72bba21 | 2826 | struct ice_tx_ring *ring; |
efc2214b | 2827 | |
e72bba21 | 2828 | ice_for_each_tx_ring(ring, q_vector->tx) |
efc2214b MF |
2829 | if (!ring->tx_buf || !ice_ring_is_xdp(ring)) |
2830 | break; | |
2831 | ||
2832 | /* restore the value of last node prior to XDP setup */ | |
e72bba21 | 2833 | q_vector->tx.tx_ring = ring; |
efc2214b MF |
2834 | } |
2835 | ||
2836 | free_qmap: | |
2837 | mutex_lock(&pf->avail_q_mutex); | |
2faf63b6 | 2838 | ice_for_each_xdp_txq(vsi, i) { |
efc2214b MF |
2839 | clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); |
2840 | vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; | |
2841 | } | |
2842 | mutex_unlock(&pf->avail_q_mutex); | |
2843 | ||
2faf63b6 | 2844 | ice_for_each_xdp_txq(vsi, i) |
efc2214b | 2845 | if (vsi->xdp_rings[i]) { |
f9124c68 MF |
2846 | if (vsi->xdp_rings[i]->desc) { |
2847 | synchronize_rcu(); | |
efc2214b | 2848 | ice_free_tx_ring(vsi->xdp_rings[i]); |
f9124c68 | 2849 | } |
288ecf49 BM |
2850 | kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); |
2851 | vsi->xdp_rings[i]->ring_stats = NULL; | |
efc2214b MF |
2852 | kfree_rcu(vsi->xdp_rings[i], rcu); |
2853 | vsi->xdp_rings[i] = NULL; | |
2854 | } | |
2855 | ||
4015d11e | 2856 | devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); |
efc2214b MF |
2857 | vsi->xdp_rings = NULL; |
2858 | ||
22bf877e MF |
2859 | if (static_key_enabled(&ice_xdp_locking_key)) |
2860 | static_branch_dec(&ice_xdp_locking_key); | |
2861 | ||
efc2214b MF |
2862 | if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) |
2863 | return 0; | |
2864 | ||
2865 | ice_vsi_assign_bpf_prog(vsi, NULL); | |
2866 | ||
2867 | /* notify Tx scheduler that we destroyed XDP queues and bring | |
2868 | * back the old number of child nodes | |
2869 | */ | |
2870 | for (i = 0; i < vsi->tc_cfg.numtc; i++) | |
2871 | max_txqs[i] = vsi->num_txq; | |
2872 | ||
c8f135c6 MP |
2873 | /* change number of XDP Tx queues to 0 */ |
2874 | vsi->num_xdp_txq = 0; | |
2875 | ||
efc2214b MF |
2876 | return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
2877 | max_txqs); | |
2878 | } | |
2879 | ||
c7a21904 MS |
2880 | /** |
2881 | * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI | |
2882 | * @vsi: VSI to schedule napi on | |
2883 | */ | |
2884 | static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) | |
2885 | { | |
2886 | int i; | |
2887 | ||
2888 | ice_for_each_rxq(vsi, i) { | |
e72bba21 | 2889 | struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; |
c7a21904 MS |
2890 | |
2891 | if (rx_ring->xsk_pool) | |
2892 | napi_schedule(&rx_ring->q_vector->napi); | |
2893 | } | |
2894 | } | |
2895 | ||
22bf877e MF |
2896 | /** |
2897 | * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have | |
2898 | * @vsi: VSI to determine the count of XDP Tx qs | |
2899 | * | |
2900 | * returns 0 if Tx qs count is higher than at least half of CPU count, | |
2901 | * -ENOMEM otherwise | |
2902 | */ | |
2903 | int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) | |
2904 | { | |
2905 | u16 avail = ice_get_avail_txq_count(vsi->back); | |
2906 | u16 cpus = num_possible_cpus(); | |
2907 | ||
2908 | if (avail < cpus / 2) | |
2909 | return -ENOMEM; | |
2910 | ||
2911 | vsi->num_xdp_txq = min_t(u16, avail, cpus); | |
2912 | ||
2913 | if (vsi->num_xdp_txq < cpus) | |
2914 | static_branch_inc(&ice_xdp_locking_key); | |
2915 | ||
2916 | return 0; | |
2917 | } | |
2918 | ||
60bc72b3 MF |
2919 | /** |
2920 | * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP | |
2921 | * @vsi: Pointer to VSI structure | |
2922 | */ | |
2923 | static int ice_max_xdp_frame_size(struct ice_vsi *vsi) | |
2924 | { | |
2925 | if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) | |
2926 | return ICE_RXBUF_1664; | |
2927 | else | |
2928 | return ICE_RXBUF_3072; | |
2929 | } | |
2930 | ||
efc2214b MF |
2931 | /** |
2932 | * ice_xdp_setup_prog - Add or remove XDP eBPF program | |
2933 | * @vsi: VSI to setup XDP for | |
2934 | * @prog: XDP program | |
2935 | * @extack: netlink extended ack | |
2936 | */ | |
2937 | static int | |
2938 | ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, | |
2939 | struct netlink_ext_ack *extack) | |
2940 | { | |
60bc72b3 | 2941 | unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; |
efc2214b MF |
2942 | bool if_running = netif_running(vsi->netdev); |
2943 | int ret = 0, xdp_ring_err = 0; | |
2944 | ||
2fba7dc5 MF |
2945 | if (prog && !prog->aux->xdp_has_frags) { |
2946 | if (frame_size > ice_max_xdp_frame_size(vsi)) { | |
2947 | NL_SET_ERR_MSG_MOD(extack, | |
2948 | "MTU is too large for linear frames and XDP prog does not support frags"); | |
2949 | return -EOPNOTSUPP; | |
2950 | } | |
efc2214b MF |
2951 | } |
2952 | ||
46974842 MF |
2953 | /* hot swap progs and avoid toggling link */ |
2954 | if (ice_is_xdp_ena_vsi(vsi) == !!prog) { | |
2955 | ice_vsi_assign_bpf_prog(vsi, prog); | |
2956 | return 0; | |
2957 | } | |
2958 | ||
efc2214b | 2959 | /* need to stop netdev while setting up the program for Rx rings */ |
e97fb1ae | 2960 | if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { |
efc2214b MF |
2961 | ret = ice_down(vsi); |
2962 | if (ret) { | |
af23635a | 2963 | NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); |
efc2214b MF |
2964 | return ret; |
2965 | } | |
2966 | } | |
2967 | ||
2968 | if (!ice_is_xdp_ena_vsi(vsi) && prog) { | |
22bf877e MF |
2969 | xdp_ring_err = ice_vsi_determine_xdp_res(vsi); |
2970 | if (xdp_ring_err) { | |
2971 | NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); | |
2972 | } else { | |
2973 | xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); | |
2974 | if (xdp_ring_err) | |
2975 | NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); | |
2976 | } | |
b6a4103c | 2977 | xdp_features_set_redirect_target(vsi->netdev, true); |
7e753eb6 PP |
2978 | /* reallocate Rx queues that are used for zero-copy */ |
2979 | xdp_ring_err = ice_realloc_zc_buf(vsi, true); | |
2980 | if (xdp_ring_err) | |
2981 | NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); | |
efc2214b | 2982 | } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { |
66c0e13a | 2983 | xdp_features_clear_redirect_target(vsi->netdev); |
efc2214b MF |
2984 | xdp_ring_err = ice_destroy_xdp_rings(vsi); |
2985 | if (xdp_ring_err) | |
af23635a | 2986 | NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); |
7e753eb6 PP |
2987 | /* reallocate Rx queues that were used for zero-copy */ |
2988 | xdp_ring_err = ice_realloc_zc_buf(vsi, false); | |
2989 | if (xdp_ring_err) | |
2990 | NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); | |
efc2214b MF |
2991 | } |
2992 | ||
2993 | if (if_running) | |
2994 | ret = ice_up(vsi); | |
2995 | ||
c7a21904 MS |
2996 | if (!ret && prog) |
2997 | ice_vsi_rx_napi_schedule(vsi); | |
2d4238f5 | 2998 | |
efc2214b MF |
2999 | return (ret || xdp_ring_err) ? -ENOMEM : 0; |
3000 | } | |
3001 | ||
ebc5399e MF |
3002 | /** |
3003 | * ice_xdp_safe_mode - XDP handler for safe mode | |
3004 | * @dev: netdevice | |
3005 | * @xdp: XDP command | |
3006 | */ | |
3007 | static int ice_xdp_safe_mode(struct net_device __always_unused *dev, | |
3008 | struct netdev_bpf *xdp) | |
3009 | { | |
3010 | NL_SET_ERR_MSG_MOD(xdp->extack, | |
3011 | "Please provide working DDP firmware package in order to use XDP\n" | |
3012 | "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); | |
3013 | return -EOPNOTSUPP; | |
3014 | } | |
3015 | ||
efc2214b MF |
3016 | /** |
3017 | * ice_xdp - implements XDP handler | |
3018 | * @dev: netdevice | |
3019 | * @xdp: XDP command | |
3020 | */ | |
3021 | static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) | |
3022 | { | |
3023 | struct ice_netdev_priv *np = netdev_priv(dev); | |
3024 | struct ice_vsi *vsi = np->vsi; | |
3025 | ||
3026 | if (vsi->type != ICE_VSI_PF) { | |
af23635a | 3027 | NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); |
efc2214b MF |
3028 | return -EINVAL; |
3029 | } | |
3030 | ||
3031 | switch (xdp->command) { | |
3032 | case XDP_SETUP_PROG: | |
3033 | return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); | |
1742b3d5 MK |
3034 | case XDP_SETUP_XSK_POOL: |
3035 | return ice_xsk_pool_setup(vsi, xdp->xsk.pool, | |
2d4238f5 | 3036 | xdp->xsk.queue_id); |
efc2214b MF |
3037 | default: |
3038 | return -EINVAL; | |
3039 | } | |
3040 | } | |
3041 | ||
940b61af AV |
3042 | /** |
3043 | * ice_ena_misc_vector - enable the non-queue interrupts | |
3044 | * @pf: board private structure | |
3045 | */ | |
3046 | static void ice_ena_misc_vector(struct ice_pf *pf) | |
3047 | { | |
3048 | struct ice_hw *hw = &pf->hw; | |
82e71b22 | 3049 | u32 pf_intr_start_offset; |
940b61af AV |
3050 | u32 val; |
3051 | ||
9d5c5a52 PG |
3052 | /* Disable anti-spoof detection interrupt to prevent spurious event |
3053 | * interrupts during a function reset. Anti-spoof functionally is | |
3054 | * still supported. | |
3055 | */ | |
3056 | val = rd32(hw, GL_MDCK_TX_TDPU); | |
3057 | val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; | |
3058 | wr32(hw, GL_MDCK_TX_TDPU, val); | |
3059 | ||
940b61af AV |
3060 | /* clear things first */ |
3061 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ | |
3062 | rd32(hw, PFINT_OICR); /* read to clear */ | |
3063 | ||
3bcd7fa3 | 3064 | val = (PFINT_OICR_ECC_ERR_M | |
940b61af AV |
3065 | PFINT_OICR_MAL_DETECT_M | |
3066 | PFINT_OICR_GRST_M | | |
3067 | PFINT_OICR_PCI_EXCEPTION_M | | |
007676b4 | 3068 | PFINT_OICR_VFLR_M | |
3bcd7fa3 | 3069 | PFINT_OICR_HMC_ERR_M | |
348048e7 | 3070 | PFINT_OICR_PE_PUSH_M | |
3bcd7fa3 | 3071 | PFINT_OICR_PE_CRITERR_M); |
940b61af AV |
3072 | |
3073 | wr32(hw, PFINT_OICR_ENA, val); | |
3074 | ||
3075 | /* SW_ITR_IDX = 0, but don't change INTENA */ | |
4aad5335 | 3076 | wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), |
940b61af | 3077 | GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); |
82e71b22 KK |
3078 | |
3079 | if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) | |
3080 | return; | |
3081 | pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; | |
3082 | wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), | |
3083 | GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); | |
3084 | } | |
3085 | ||
3086 | /** | |
3087 | * ice_ll_ts_intr - ll_ts interrupt handler | |
3088 | * @irq: interrupt number | |
3089 | * @data: pointer to a q_vector | |
3090 | */ | |
3091 | static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data) | |
3092 | { | |
3093 | struct ice_pf *pf = data; | |
3094 | u32 pf_intr_start_offset; | |
3095 | struct ice_ptp_tx *tx; | |
3096 | unsigned long flags; | |
3097 | struct ice_hw *hw; | |
3098 | u32 val; | |
3099 | u8 idx; | |
3100 | ||
3101 | hw = &pf->hw; | |
3102 | tx = &pf->ptp.port.tx; | |
3103 | spin_lock_irqsave(&tx->lock, flags); | |
3104 | ice_ptp_complete_tx_single_tstamp(tx); | |
3105 | ||
3106 | idx = find_next_bit_wrap(tx->in_use, tx->len, | |
3107 | tx->last_ll_ts_idx_read + 1); | |
3108 | if (idx != tx->len) | |
3109 | ice_ptp_req_tx_single_tstamp(tx, idx); | |
3110 | spin_unlock_irqrestore(&tx->lock, flags); | |
3111 | ||
3112 | val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | | |
3113 | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); | |
3114 | pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; | |
3115 | wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), | |
3116 | val); | |
3117 | ||
3118 | return IRQ_HANDLED; | |
940b61af AV |
3119 | } |
3120 | ||
3121 | /** | |
3122 | * ice_misc_intr - misc interrupt handler | |
3123 | * @irq: interrupt number | |
3124 | * @data: pointer to a q_vector | |
3125 | */ | |
3126 | static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) | |
3127 | { | |
3128 | struct ice_pf *pf = (struct ice_pf *)data; | |
00d50001 | 3129 | irqreturn_t ret = IRQ_HANDLED; |
940b61af | 3130 | struct ice_hw *hw = &pf->hw; |
4015d11e | 3131 | struct device *dev; |
940b61af AV |
3132 | u32 oicr, ena_mask; |
3133 | ||
4015d11e | 3134 | dev = ice_pf_to_dev(pf); |
7e408e07 AV |
3135 | set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); |
3136 | set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); | |
8f5ee3c4 | 3137 | set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); |
940b61af AV |
3138 | |
3139 | oicr = rd32(hw, PFINT_OICR); | |
3140 | ena_mask = rd32(hw, PFINT_OICR_ENA); | |
3141 | ||
0e674aeb AV |
3142 | if (oicr & PFINT_OICR_SWINT_M) { |
3143 | ena_mask &= ~PFINT_OICR_SWINT_M; | |
3144 | pf->sw_int_count++; | |
3145 | } | |
3146 | ||
b3969fd7 SM |
3147 | if (oicr & PFINT_OICR_MAL_DETECT_M) { |
3148 | ena_mask &= ~PFINT_OICR_MAL_DETECT_M; | |
7e408e07 | 3149 | set_bit(ICE_MDD_EVENT_PENDING, pf->state); |
b3969fd7 | 3150 | } |
007676b4 | 3151 | if (oicr & PFINT_OICR_VFLR_M) { |
f844d521 | 3152 | /* disable any further VFLR event notifications */ |
7e408e07 | 3153 | if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { |
f844d521 BC |
3154 | u32 reg = rd32(hw, PFINT_OICR_ENA); |
3155 | ||
3156 | reg &= ~PFINT_OICR_VFLR_M; | |
3157 | wr32(hw, PFINT_OICR_ENA, reg); | |
3158 | } else { | |
3159 | ena_mask &= ~PFINT_OICR_VFLR_M; | |
7e408e07 | 3160 | set_bit(ICE_VFLR_EVENT_PENDING, pf->state); |
f844d521 | 3161 | } |
007676b4 | 3162 | } |
b3969fd7 | 3163 | |
0b28b702 AV |
3164 | if (oicr & PFINT_OICR_GRST_M) { |
3165 | u32 reset; | |
b3969fd7 | 3166 | |
0b28b702 AV |
3167 | /* we have a reset warning */ |
3168 | ena_mask &= ~PFINT_OICR_GRST_M; | |
5a259f8e JB |
3169 | reset = FIELD_GET(GLGEN_RSTAT_RESET_TYPE_M, |
3170 | rd32(hw, GLGEN_RSTAT)); | |
0b28b702 AV |
3171 | |
3172 | if (reset == ICE_RESET_CORER) | |
3173 | pf->corer_count++; | |
3174 | else if (reset == ICE_RESET_GLOBR) | |
3175 | pf->globr_count++; | |
ca4929b6 | 3176 | else if (reset == ICE_RESET_EMPR) |
0b28b702 | 3177 | pf->empr_count++; |
ca4929b6 | 3178 | else |
4015d11e | 3179 | dev_dbg(dev, "Invalid reset type %d\n", reset); |
0b28b702 AV |
3180 | |
3181 | /* If a reset cycle isn't already in progress, we set a bit in | |
3182 | * pf->state so that the service task can start a reset/rebuild. | |
0b28b702 | 3183 | */ |
7e408e07 | 3184 | if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { |
0b28b702 | 3185 | if (reset == ICE_RESET_CORER) |
7e408e07 | 3186 | set_bit(ICE_CORER_RECV, pf->state); |
0b28b702 | 3187 | else if (reset == ICE_RESET_GLOBR) |
7e408e07 | 3188 | set_bit(ICE_GLOBR_RECV, pf->state); |
0b28b702 | 3189 | else |
7e408e07 | 3190 | set_bit(ICE_EMPR_RECV, pf->state); |
0b28b702 | 3191 | |
fd2a9817 AV |
3192 | /* There are couple of different bits at play here. |
3193 | * hw->reset_ongoing indicates whether the hardware is | |
3194 | * in reset. This is set to true when a reset interrupt | |
3195 | * is received and set back to false after the driver | |
3196 | * has determined that the hardware is out of reset. | |
3197 | * | |
7e408e07 | 3198 | * ICE_RESET_OICR_RECV in pf->state indicates |
fd2a9817 AV |
3199 | * that a post reset rebuild is required before the |
3200 | * driver is operational again. This is set above. | |
3201 | * | |
3202 | * As this is the start of the reset/rebuild cycle, set | |
3203 | * both to indicate that. | |
3204 | */ | |
3205 | hw->reset_ongoing = true; | |
0b28b702 AV |
3206 | } |
3207 | } | |
3208 | ||
ea9b847c JK |
3209 | if (oicr & PFINT_OICR_TSYN_TX_M) { |
3210 | ena_mask &= ~PFINT_OICR_TSYN_TX_M; | |
82e71b22 KK |
3211 | if (ice_pf_state_is_nominal(pf) && |
3212 | pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { | |
3213 | struct ice_ptp_tx *tx = &pf->ptp.port.tx; | |
3214 | unsigned long flags; | |
3215 | u8 idx; | |
3216 | ||
3217 | spin_lock_irqsave(&tx->lock, flags); | |
3218 | idx = find_next_bit_wrap(tx->in_use, tx->len, | |
3219 | tx->last_ll_ts_idx_read + 1); | |
3220 | if (idx != tx->len) | |
3221 | ice_ptp_req_tx_single_tstamp(tx, idx); | |
3222 | spin_unlock_irqrestore(&tx->lock, flags); | |
3223 | } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { | |
6e8b2c88 | 3224 | set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); |
00d50001 KK |
3225 | ret = IRQ_WAKE_THREAD; |
3226 | } | |
ea9b847c JK |
3227 | } |
3228 | ||
172db5f9 MM |
3229 | if (oicr & PFINT_OICR_TSYN_EVNT_M) { |
3230 | u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; | |
3231 | u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); | |
3232 | ||
172db5f9 | 3233 | ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; |
6e8b2c88 | 3234 | |
42d40bb2 | 3235 | if (ice_pf_src_tmr_owned(pf)) { |
6e8b2c88 KK |
3236 | /* Save EVENTs from GLTSYN register */ |
3237 | pf->ptp.ext_ts_irq |= gltsyn_stat & | |
3238 | (GLTSYN_STAT_EVENT0_M | | |
3239 | GLTSYN_STAT_EVENT1_M | | |
3240 | GLTSYN_STAT_EVENT2_M); | |
3241 | ||
00d50001 | 3242 | ice_ptp_extts_event(pf); |
6e8b2c88 | 3243 | } |
172db5f9 MM |
3244 | } |
3245 | ||
348048e7 DE |
3246 | #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) |
3247 | if (oicr & ICE_AUX_CRIT_ERR) { | |
32d53c0a AL |
3248 | pf->oicr_err_reg |= oicr; |
3249 | set_bit(ICE_AUX_ERR_PENDING, pf->state); | |
348048e7 | 3250 | ena_mask &= ~ICE_AUX_CRIT_ERR; |
940b61af AV |
3251 | } |
3252 | ||
8d7189d2 | 3253 | /* Report any remaining unexpected interrupts */ |
940b61af AV |
3254 | oicr &= ena_mask; |
3255 | if (oicr) { | |
4015d11e | 3256 | dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); |
940b61af AV |
3257 | /* If a critical error is pending there is no choice but to |
3258 | * reset the device. | |
3259 | */ | |
348048e7 | 3260 | if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | |
0b28b702 | 3261 | PFINT_OICR_ECC_ERR_M)) { |
7e408e07 | 3262 | set_bit(ICE_PFR_REQ, pf->state); |
0b28b702 | 3263 | } |
940b61af | 3264 | } |
00d50001 KK |
3265 | ice_service_task_schedule(pf); |
3266 | if (ret == IRQ_HANDLED) | |
3267 | ice_irq_dynamic_ena(hw, NULL, NULL); | |
940b61af | 3268 | |
00d50001 | 3269 | return ret; |
940b61af AV |
3270 | } |
3271 | ||
1229b339 KK |
3272 | /** |
3273 | * ice_misc_intr_thread_fn - misc interrupt thread function | |
3274 | * @irq: interrupt number | |
3275 | * @data: pointer to a q_vector | |
3276 | */ | |
3277 | static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) | |
3278 | { | |
1229b339 | 3279 | struct ice_pf *pf = data; |
0ec38df3 JK |
3280 | struct ice_hw *hw; |
3281 | ||
3282 | hw = &pf->hw; | |
1229b339 | 3283 | |
30f15874 | 3284 | if (ice_is_reset_in_progress(pf->state)) |
00d50001 | 3285 | goto skip_irq; |
6e8b2c88 KK |
3286 | |
3287 | if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { | |
9a8648cc JK |
3288 | /* Process outstanding Tx timestamps. If there is more work, |
3289 | * re-arm the interrupt to trigger again. | |
3290 | */ | |
3291 | if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { | |
3292 | wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); | |
3293 | ice_flush(hw); | |
3294 | } | |
6e8b2c88 | 3295 | } |
30f15874 | 3296 | |
00d50001 | 3297 | skip_irq: |
0ec38df3 JK |
3298 | ice_irq_dynamic_ena(hw, NULL, NULL); |
3299 | ||
30f15874 | 3300 | return IRQ_HANDLED; |
1229b339 KK |
3301 | } |
3302 | ||
0e04e8e1 BC |
3303 | /** |
3304 | * ice_dis_ctrlq_interrupts - disable control queue interrupts | |
3305 | * @hw: pointer to HW structure | |
3306 | */ | |
3307 | static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) | |
3308 | { | |
3309 | /* disable Admin queue Interrupt causes */ | |
3310 | wr32(hw, PFINT_FW_CTL, | |
3311 | rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); | |
3312 | ||
3313 | /* disable Mailbox queue Interrupt causes */ | |
3314 | wr32(hw, PFINT_MBX_CTL, | |
3315 | rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); | |
3316 | ||
8f5ee3c4 JK |
3317 | wr32(hw, PFINT_SB_CTL, |
3318 | rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); | |
3319 | ||
0e04e8e1 BC |
3320 | /* disable Control queue Interrupt causes */ |
3321 | wr32(hw, PFINT_OICR_CTL, | |
3322 | rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); | |
3323 | ||
3324 | ice_flush(hw); | |
3325 | } | |
3326 | ||
82e71b22 KK |
3327 | /** |
3328 | * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup | |
3329 | * @pf: board private structure | |
3330 | */ | |
3331 | static void ice_free_irq_msix_ll_ts(struct ice_pf *pf) | |
3332 | { | |
3333 | int irq_num = pf->ll_ts_irq.virq; | |
3334 | ||
3335 | synchronize_irq(irq_num); | |
3336 | devm_free_irq(ice_pf_to_dev(pf), irq_num, pf); | |
3337 | ||
3338 | ice_free_irq(pf, pf->ll_ts_irq); | |
3339 | } | |
3340 | ||
940b61af AV |
3341 | /** |
3342 | * ice_free_irq_msix_misc - Unroll misc vector setup | |
3343 | * @pf: board private structure | |
3344 | */ | |
3345 | static void ice_free_irq_msix_misc(struct ice_pf *pf) | |
3346 | { | |
4aad5335 | 3347 | int misc_irq_num = pf->oicr_irq.virq; |
0e04e8e1 BC |
3348 | struct ice_hw *hw = &pf->hw; |
3349 | ||
3350 | ice_dis_ctrlq_interrupts(hw); | |
3351 | ||
940b61af | 3352 | /* disable OICR interrupt */ |
0e04e8e1 BC |
3353 | wr32(hw, PFINT_OICR_ENA, 0); |
3354 | ice_flush(hw); | |
940b61af | 3355 | |
05018936 PR |
3356 | synchronize_irq(misc_irq_num); |
3357 | devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); | |
940b61af | 3358 | |
4aad5335 | 3359 | ice_free_irq(pf, pf->oicr_irq); |
82e71b22 KK |
3360 | if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) |
3361 | ice_free_irq_msix_ll_ts(pf); | |
940b61af AV |
3362 | } |
3363 | ||
0e04e8e1 BC |
3364 | /** |
3365 | * ice_ena_ctrlq_interrupts - enable control queue interrupts | |
3366 | * @hw: pointer to HW structure | |
b07833a0 | 3367 | * @reg_idx: HW vector index to associate the control queue interrupts with |
0e04e8e1 | 3368 | */ |
b07833a0 | 3369 | static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) |
0e04e8e1 BC |
3370 | { |
3371 | u32 val; | |
3372 | ||
b07833a0 | 3373 | val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
0e04e8e1 BC |
3374 | PFINT_OICR_CTL_CAUSE_ENA_M); |
3375 | wr32(hw, PFINT_OICR_CTL, val); | |
3376 | ||
3377 | /* enable Admin queue Interrupt causes */ | |
b07833a0 | 3378 | val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
0e04e8e1 BC |
3379 | PFINT_FW_CTL_CAUSE_ENA_M); |
3380 | wr32(hw, PFINT_FW_CTL, val); | |
3381 | ||
3382 | /* enable Mailbox queue Interrupt causes */ | |
b07833a0 | 3383 | val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | |
0e04e8e1 BC |
3384 | PFINT_MBX_CTL_CAUSE_ENA_M); |
3385 | wr32(hw, PFINT_MBX_CTL, val); | |
3386 | ||
82e71b22 KK |
3387 | if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { |
3388 | /* enable Sideband queue Interrupt causes */ | |
3389 | val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | | |
3390 | PFINT_SB_CTL_CAUSE_ENA_M); | |
3391 | wr32(hw, PFINT_SB_CTL, val); | |
3392 | } | |
8f5ee3c4 | 3393 | |
0e04e8e1 BC |
3394 | ice_flush(hw); |
3395 | } | |
3396 | ||
940b61af AV |
3397 | /** |
3398 | * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events | |
3399 | * @pf: board private structure | |
3400 | * | |
3401 | * This sets up the handler for MSIX 0, which is used to manage the | |
df17b7e0 | 3402 | * non-queue interrupts, e.g. AdminQ and errors. This is not used |
940b61af AV |
3403 | * when in MSI or Legacy interrupt mode. |
3404 | */ | |
3405 | static int ice_req_irq_msix_misc(struct ice_pf *pf) | |
3406 | { | |
4015d11e | 3407 | struct device *dev = ice_pf_to_dev(pf); |
940b61af | 3408 | struct ice_hw *hw = &pf->hw; |
82e71b22 KK |
3409 | u32 pf_intr_start_offset; |
3410 | struct msi_map irq; | |
4aad5335 | 3411 | int err = 0; |
940b61af AV |
3412 | |
3413 | if (!pf->int_name[0]) | |
3414 | snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", | |
4015d11e | 3415 | dev_driver_string(dev), dev_name(dev)); |
940b61af | 3416 | |
82e71b22 KK |
3417 | if (!pf->int_name_ll_ts[0]) |
3418 | snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, | |
3419 | "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); | |
0b28b702 AV |
3420 | /* Do not request IRQ but do enable OICR interrupt since settings are |
3421 | * lost during reset. Note that this function is called only during | |
3422 | * rebuild path and not while reset is in progress. | |
3423 | */ | |
5df7e45d | 3424 | if (ice_is_reset_in_progress(pf->state)) |
0b28b702 AV |
3425 | goto skip_req_irq; |
3426 | ||
cbe66bfe | 3427 | /* reserve one vector in irq_tracker for misc interrupts */ |
82e71b22 KK |
3428 | irq = ice_alloc_irq(pf, false); |
3429 | if (irq.index < 0) | |
3430 | return irq.index; | |
4aad5335 | 3431 | |
82e71b22 | 3432 | pf->oicr_irq = irq; |
4aad5335 PR |
3433 | err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, |
3434 | ice_misc_intr_thread_fn, 0, | |
3435 | pf->int_name, pf); | |
940b61af | 3436 | if (err) { |
1229b339 | 3437 | dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", |
940b61af | 3438 | pf->int_name, err); |
4aad5335 | 3439 | ice_free_irq(pf, pf->oicr_irq); |
940b61af AV |
3440 | return err; |
3441 | } | |
3442 | ||
82e71b22 KK |
3443 | /* reserve one vector in irq_tracker for ll_ts interrupt */ |
3444 | if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) | |
3445 | goto skip_req_irq; | |
3446 | ||
3447 | irq = ice_alloc_irq(pf, false); | |
3448 | if (irq.index < 0) | |
3449 | return irq.index; | |
3450 | ||
3451 | pf->ll_ts_irq = irq; | |
3452 | err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, | |
3453 | pf->int_name_ll_ts, pf); | |
3454 | if (err) { | |
3455 | dev_err(dev, "devm_request_irq for %s failed: %d\n", | |
3456 | pf->int_name_ll_ts, err); | |
3457 | ice_free_irq(pf, pf->ll_ts_irq); | |
3458 | return err; | |
3459 | } | |
3460 | ||
0b28b702 | 3461 | skip_req_irq: |
940b61af AV |
3462 | ice_ena_misc_vector(pf); |
3463 | ||
4aad5335 | 3464 | ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); |
82e71b22 KK |
3465 | /* This enables LL TS interrupt */ |
3466 | pf_intr_start_offset = rd32(hw, PFINT_ALLOC) & PFINT_ALLOC_FIRST; | |
3467 | if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) | |
3468 | wr32(hw, PFINT_SB_CTL, | |
3469 | ((pf->ll_ts_irq.index + pf_intr_start_offset) & | |
3470 | PFINT_SB_CTL_MSIX_INDX_M) | PFINT_SB_CTL_CAUSE_ENA_M); | |
4aad5335 | 3471 | wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), |
63f545ed | 3472 | ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); |
940b61af AV |
3473 | |
3474 | ice_flush(hw); | |
cdedef59 | 3475 | ice_irq_dynamic_ena(hw, NULL, NULL); |
940b61af AV |
3476 | |
3477 | return 0; | |
3478 | } | |
3479 | ||
3a858ba3 | 3480 | /** |
df0f8479 AV |
3481 | * ice_napi_add - register NAPI handler for the VSI |
3482 | * @vsi: VSI for which NAPI handler is to be registered | |
3a858ba3 | 3483 | * |
df0f8479 AV |
3484 | * This function is only called in the driver's load path. Registering the NAPI |
3485 | * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, | |
3486 | * reset/rebuild, etc.) | |
3a858ba3 | 3487 | */ |
df0f8479 | 3488 | static void ice_napi_add(struct ice_vsi *vsi) |
3a858ba3 | 3489 | { |
df0f8479 | 3490 | int v_idx; |
3a858ba3 | 3491 | |
df0f8479 | 3492 | if (!vsi->netdev) |
3a858ba3 | 3493 | return; |
3a858ba3 | 3494 | |
91fdbce7 | 3495 | ice_for_each_q_vector(vsi, v_idx) { |
df0f8479 | 3496 | netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, |
b48b89f9 | 3497 | ice_napi_poll); |
080b0c8d | 3498 | __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); |
91fdbce7 | 3499 | } |
3a858ba3 AV |
3500 | } |
3501 | ||
3502 | /** | |
462acf6a | 3503 | * ice_set_ops - set netdev and ethtools ops for the given netdev |
b6a4103c | 3504 | * @vsi: the VSI associated with the new netdev |
3a858ba3 | 3505 | */ |
b6a4103c | 3506 | static void ice_set_ops(struct ice_vsi *vsi) |
3a858ba3 | 3507 | { |
b6a4103c | 3508 | struct net_device *netdev = vsi->netdev; |
462acf6a TN |
3509 | struct ice_pf *pf = ice_netdev_to_pf(netdev); |
3510 | ||
3511 | if (ice_is_safe_mode(pf)) { | |
3512 | netdev->netdev_ops = &ice_netdev_safe_mode_ops; | |
3513 | ice_set_ethtool_safe_mode_ops(netdev); | |
3514 | return; | |
3515 | } | |
3516 | ||
3517 | netdev->netdev_ops = &ice_netdev_ops; | |
b20e6c17 | 3518 | netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; |
9031d5f4 | 3519 | netdev->xdp_metadata_ops = &ice_xdp_md_ops; |
462acf6a | 3520 | ice_set_ethtool_ops(netdev); |
b6a4103c LB |
3521 | |
3522 | if (vsi->type != ICE_VSI_PF) | |
3523 | return; | |
3524 | ||
3525 | netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | | |
3526 | NETDEV_XDP_ACT_XSK_ZEROCOPY | | |
3527 | NETDEV_XDP_ACT_RX_SG; | |
eeb2b538 | 3528 | netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; |
462acf6a TN |
3529 | } |
3530 | ||
3531 | /** | |
3532 | * ice_set_netdev_features - set features for the given netdev | |
3533 | * @netdev: netdev instance | |
3534 | */ | |
3535 | static void ice_set_netdev_features(struct net_device *netdev) | |
3536 | { | |
3537 | struct ice_pf *pf = ice_netdev_to_pf(netdev); | |
1babaf77 | 3538 | bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); |
d76a60ba AV |
3539 | netdev_features_t csumo_features; |
3540 | netdev_features_t vlano_features; | |
3541 | netdev_features_t dflt_features; | |
3542 | netdev_features_t tso_features; | |
3a858ba3 | 3543 | |
462acf6a TN |
3544 | if (ice_is_safe_mode(pf)) { |
3545 | /* safe mode */ | |
3546 | netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; | |
3547 | netdev->hw_features = netdev->features; | |
3548 | return; | |
3549 | } | |
3a858ba3 | 3550 | |
d76a60ba AV |
3551 | dflt_features = NETIF_F_SG | |
3552 | NETIF_F_HIGHDMA | | |
148beb61 | 3553 | NETIF_F_NTUPLE | |
d76a60ba AV |
3554 | NETIF_F_RXHASH; |
3555 | ||
3556 | csumo_features = NETIF_F_RXCSUM | | |
3557 | NETIF_F_IP_CSUM | | |
cf909e19 | 3558 | NETIF_F_SCTP_CRC | |
d76a60ba AV |
3559 | NETIF_F_IPV6_CSUM; |
3560 | ||
3561 | vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | | |
3562 | NETIF_F_HW_VLAN_CTAG_TX | | |
3563 | NETIF_F_HW_VLAN_CTAG_RX; | |
3564 | ||
1babaf77 BC |
3565 | /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ |
3566 | if (is_dvm_ena) | |
3567 | vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER; | |
3568 | ||
a4e82a81 TN |
3569 | tso_features = NETIF_F_TSO | |
3570 | NETIF_F_TSO_ECN | | |
3571 | NETIF_F_TSO6 | | |
3572 | NETIF_F_GSO_GRE | | |
3573 | NETIF_F_GSO_UDP_TUNNEL | | |
3574 | NETIF_F_GSO_GRE_CSUM | | |
3575 | NETIF_F_GSO_UDP_TUNNEL_CSUM | | |
3576 | NETIF_F_GSO_PARTIAL | | |
3577 | NETIF_F_GSO_IPXIP4 | | |
3578 | NETIF_F_GSO_IPXIP6 | | |
a54e3b8c | 3579 | NETIF_F_GSO_UDP_L4; |
d76a60ba | 3580 | |
a4e82a81 TN |
3581 | netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | |
3582 | NETIF_F_GSO_GRE_CSUM; | |
3a858ba3 | 3583 | /* set features that user can change */ |
d76a60ba AV |
3584 | netdev->hw_features = dflt_features | csumo_features | |
3585 | vlano_features | tso_features; | |
3a858ba3 | 3586 | |
a4e82a81 | 3587 | /* add support for HW_CSUM on packets with MPLS header */ |
69e66c04 JD |
3588 | netdev->mpls_features = NETIF_F_HW_CSUM | |
3589 | NETIF_F_TSO | | |
3590 | NETIF_F_TSO6; | |
a4e82a81 | 3591 | |
3a858ba3 AV |
3592 | /* enable features */ |
3593 | netdev->features |= netdev->hw_features; | |
0d08a441 KP |
3594 | |
3595 | netdev->hw_features |= NETIF_F_HW_TC; | |
44ece4e1 | 3596 | netdev->hw_features |= NETIF_F_LOOPBACK; |
0d08a441 | 3597 | |
d76a60ba AV |
3598 | /* encap and VLAN devices inherit default, csumo and tso features */ |
3599 | netdev->hw_enc_features |= dflt_features | csumo_features | | |
3600 | tso_features; | |
3601 | netdev->vlan_features |= dflt_features | csumo_features | | |
3602 | tso_features; | |
1babaf77 BC |
3603 | |
3604 | /* advertise support but don't enable by default since only one type of | |
3605 | * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one | |
3606 | * type turns on the other has to be turned off. This is enforced by the | |
3607 | * ice_fix_features() ndo callback. | |
3608 | */ | |
3609 | if (is_dvm_ena) | |
3610 | netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | | |
3611 | NETIF_F_HW_VLAN_STAG_TX; | |
dddd406d JB |
3612 | |
3613 | /* Leave CRC / FCS stripping enabled by default, but allow the value to | |
3614 | * be changed at runtime | |
3615 | */ | |
3616 | netdev->hw_features |= NETIF_F_RXFCS; | |
fce92dbc PC |
3617 | |
3618 | netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); | |
462acf6a TN |
3619 | } |
3620 | ||
d76a60ba AV |
3621 | /** |
3622 | * ice_fill_rss_lut - Fill the RSS lookup table with default values | |
3623 | * @lut: Lookup table | |
3624 | * @rss_table_size: Lookup table size | |
3625 | * @rss_size: Range of queue number for hashing | |
3626 | */ | |
3627 | void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) | |
3628 | { | |
3629 | u16 i; | |
3630 | ||
3631 | for (i = 0; i < rss_table_size; i++) | |
3632 | lut[i] = i % rss_size; | |
3633 | } | |
3634 | ||
0f9d5027 AV |
3635 | /** |
3636 | * ice_pf_vsi_setup - Set up a PF VSI | |
3637 | * @pf: board private structure | |
3638 | * @pi: pointer to the port_info instance | |
3639 | * | |
0e674aeb AV |
3640 | * Returns pointer to the successfully allocated VSI software struct |
3641 | * on success, otherwise returns NULL on failure. | |
0f9d5027 AV |
3642 | */ |
3643 | static struct ice_vsi * | |
3644 | ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) | |
3645 | { | |
5e509ab2 JK |
3646 | struct ice_vsi_cfg_params params = {}; |
3647 | ||
3648 | params.type = ICE_VSI_PF; | |
3649 | params.pi = pi; | |
3650 | params.flags = ICE_VSI_FLAG_INIT; | |
3651 | ||
3652 | return ice_vsi_setup(pf, ¶ms); | |
0f9d5027 AV |
3653 | } |
3654 | ||
fbc7b27a KP |
3655 | static struct ice_vsi * |
3656 | ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, | |
3657 | struct ice_channel *ch) | |
3658 | { | |
5e509ab2 JK |
3659 | struct ice_vsi_cfg_params params = {}; |
3660 | ||
3661 | params.type = ICE_VSI_CHNL; | |
3662 | params.pi = pi; | |
3663 | params.ch = ch; | |
3664 | params.flags = ICE_VSI_FLAG_INIT; | |
3665 | ||
3666 | return ice_vsi_setup(pf, ¶ms); | |
fbc7b27a KP |
3667 | } |
3668 | ||
148beb61 HT |
3669 | /** |
3670 | * ice_ctrl_vsi_setup - Set up a control VSI | |
3671 | * @pf: board private structure | |
3672 | * @pi: pointer to the port_info instance | |
3673 | * | |
3674 | * Returns pointer to the successfully allocated VSI software struct | |
3675 | * on success, otherwise returns NULL on failure. | |
3676 | */ | |
3677 | static struct ice_vsi * | |
3678 | ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) | |
3679 | { | |
5e509ab2 JK |
3680 | struct ice_vsi_cfg_params params = {}; |
3681 | ||
3682 | params.type = ICE_VSI_CTRL; | |
3683 | params.pi = pi; | |
3684 | params.flags = ICE_VSI_FLAG_INIT; | |
3685 | ||
3686 | return ice_vsi_setup(pf, ¶ms); | |
148beb61 HT |
3687 | } |
3688 | ||
0e674aeb AV |
3689 | /** |
3690 | * ice_lb_vsi_setup - Set up a loopback VSI | |
3691 | * @pf: board private structure | |
3692 | * @pi: pointer to the port_info instance | |
3693 | * | |
3694 | * Returns pointer to the successfully allocated VSI software struct | |
3695 | * on success, otherwise returns NULL on failure. | |
3696 | */ | |
3697 | struct ice_vsi * | |
3698 | ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) | |
3699 | { | |
5e509ab2 JK |
3700 | struct ice_vsi_cfg_params params = {}; |
3701 | ||
3702 | params.type = ICE_VSI_LB; | |
3703 | params.pi = pi; | |
3704 | params.flags = ICE_VSI_FLAG_INIT; | |
3705 | ||
3706 | return ice_vsi_setup(pf, ¶ms); | |
0e674aeb AV |
3707 | } |
3708 | ||
d76a60ba | 3709 | /** |
f9867df6 | 3710 | * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload |
d76a60ba | 3711 | * @netdev: network interface to be adjusted |
2bfefa2d | 3712 | * @proto: VLAN TPID |
f9867df6 | 3713 | * @vid: VLAN ID to be added |
d76a60ba | 3714 | * |
f9867df6 | 3715 | * net_device_ops implementation for adding VLAN IDs |
d76a60ba | 3716 | */ |
c8b7abdd | 3717 | static int |
2bfefa2d | 3718 | ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) |
d76a60ba AV |
3719 | { |
3720 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
c31af68a | 3721 | struct ice_vsi_vlan_ops *vlan_ops; |
d76a60ba | 3722 | struct ice_vsi *vsi = np->vsi; |
fb05ba12 | 3723 | struct ice_vlan vlan; |
5eda8afd | 3724 | int ret; |
d76a60ba | 3725 | |
42f3efef BC |
3726 | /* VLAN 0 is added by default during load/reset */ |
3727 | if (!vid) | |
3728 | return 0; | |
3729 | ||
1273f895 IV |
3730 | while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) |
3731 | usleep_range(1000, 2000); | |
3732 | ||
3733 | /* Add multicast promisc rule for the VLAN ID to be added if | |
3734 | * all-multicast is currently enabled. | |
3735 | */ | |
3736 | if (vsi->current_netdev_flags & IFF_ALLMULTI) { | |
3737 | ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, | |
3738 | ICE_MCAST_VLAN_PROMISC_BITS, | |
3739 | vid); | |
3740 | if (ret) | |
3741 | goto finish; | |
3742 | } | |
3743 | ||
c31af68a | 3744 | vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
4f74dcc1 | 3745 | |
42f3efef BC |
3746 | /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged |
3747 | * packets aren't pruned by the device's internal switch on Rx | |
d76a60ba | 3748 | */ |
2bfefa2d | 3749 | vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); |
c31af68a | 3750 | ret = vlan_ops->add_vlan(vsi, &vlan); |
1273f895 IV |
3751 | if (ret) |
3752 | goto finish; | |
3753 | ||
3754 | /* If all-multicast is currently enabled and this VLAN ID is only one | |
3755 | * besides VLAN-0 we have to update look-up type of multicast promisc | |
3756 | * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. | |
3757 | */ | |
3758 | if ((vsi->current_netdev_flags & IFF_ALLMULTI) && | |
3759 | ice_vsi_num_non_zero_vlans(vsi) == 1) { | |
3760 | ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, | |
3761 | ICE_MCAST_PROMISC_BITS, 0); | |
3762 | ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, | |
3763 | ICE_MCAST_VLAN_PROMISC_BITS, 0); | |
3764 | } | |
3765 | ||
3766 | finish: | |
3767 | clear_bit(ICE_CFG_BUSY, vsi->state); | |
5eda8afd AA |
3768 | |
3769 | return ret; | |
d76a60ba AV |
3770 | } |
3771 | ||
d76a60ba | 3772 | /** |
f9867df6 | 3773 | * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload |
d76a60ba | 3774 | * @netdev: network interface to be adjusted |
2bfefa2d | 3775 | * @proto: VLAN TPID |
f9867df6 | 3776 | * @vid: VLAN ID to be removed |
d76a60ba | 3777 | * |
f9867df6 | 3778 | * net_device_ops implementation for removing VLAN IDs |
d76a60ba | 3779 | */ |
c8b7abdd | 3780 | static int |
2bfefa2d | 3781 | ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) |
d76a60ba AV |
3782 | { |
3783 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
c31af68a | 3784 | struct ice_vsi_vlan_ops *vlan_ops; |
d76a60ba | 3785 | struct ice_vsi *vsi = np->vsi; |
fb05ba12 | 3786 | struct ice_vlan vlan; |
5eda8afd | 3787 | int ret; |
d76a60ba | 3788 | |
42f3efef BC |
3789 | /* don't allow removal of VLAN 0 */ |
3790 | if (!vid) | |
3791 | return 0; | |
3792 | ||
1273f895 IV |
3793 | while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) |
3794 | usleep_range(1000, 2000); | |
3795 | ||
abddafd4 GS |
3796 | ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, |
3797 | ICE_MCAST_VLAN_PROMISC_BITS, vid); | |
3798 | if (ret) { | |
3799 | netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n", | |
3800 | vsi->vsi_num); | |
3801 | vsi->current_netdev_flags |= IFF_ALLMULTI; | |
3802 | } | |
3803 | ||
c31af68a BC |
3804 | vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); |
3805 | ||
bc42afa9 | 3806 | /* Make sure VLAN delete is successful before updating VLAN |
4f74dcc1 | 3807 | * information |
d76a60ba | 3808 | */ |
2bfefa2d | 3809 | vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0); |
c31af68a | 3810 | ret = vlan_ops->del_vlan(vsi, &vlan); |
5eda8afd | 3811 | if (ret) |
1273f895 | 3812 | goto finish; |
d76a60ba | 3813 | |
1273f895 IV |
3814 | /* Remove multicast promisc rule for the removed VLAN ID if |
3815 | * all-multicast is enabled. | |
3816 | */ | |
3817 | if (vsi->current_netdev_flags & IFF_ALLMULTI) | |
3818 | ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, | |
3819 | ICE_MCAST_VLAN_PROMISC_BITS, vid); | |
3820 | ||
3821 | if (!ice_vsi_has_non_zero_vlans(vsi)) { | |
3822 | /* Update look-up type of multicast promisc rule for VLAN 0 | |
3823 | * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when | |
3824 | * all-multicast is enabled and VLAN 0 is the only VLAN rule. | |
3825 | */ | |
3826 | if (vsi->current_netdev_flags & IFF_ALLMULTI) { | |
3827 | ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, | |
3828 | ICE_MCAST_VLAN_PROMISC_BITS, | |
3829 | 0); | |
3830 | ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, | |
3831 | ICE_MCAST_PROMISC_BITS, 0); | |
3832 | } | |
3833 | } | |
3834 | ||
3835 | finish: | |
3836 | clear_bit(ICE_CFG_BUSY, vsi->state); | |
3837 | ||
3838 | return ret; | |
d76a60ba AV |
3839 | } |
3840 | ||
195bb48f MS |
3841 | /** |
3842 | * ice_rep_indr_tc_block_unbind | |
3843 | * @cb_priv: indirection block private data | |
3844 | */ | |
3845 | static void ice_rep_indr_tc_block_unbind(void *cb_priv) | |
3846 | { | |
3847 | struct ice_indr_block_priv *indr_priv = cb_priv; | |
3848 | ||
3849 | list_del(&indr_priv->list); | |
3850 | kfree(indr_priv); | |
3851 | } | |
3852 | ||
3853 | /** | |
3854 | * ice_tc_indir_block_unregister - Unregister TC indirect block notifications | |
3855 | * @vsi: VSI struct which has the netdev | |
3856 | */ | |
3857 | static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) | |
3858 | { | |
3859 | struct ice_netdev_priv *np = netdev_priv(vsi->netdev); | |
3860 | ||
3861 | flow_indr_dev_unregister(ice_indr_setup_tc_cb, np, | |
3862 | ice_rep_indr_tc_block_unbind); | |
3863 | } | |
3864 | ||
195bb48f MS |
3865 | /** |
3866 | * ice_tc_indir_block_register - Register TC indirect block notifications | |
3867 | * @vsi: VSI struct which has the netdev | |
3868 | * | |
3869 | * Returns 0 on success, negative value on failure | |
3870 | */ | |
3871 | static int ice_tc_indir_block_register(struct ice_vsi *vsi) | |
3872 | { | |
3873 | struct ice_netdev_priv *np; | |
3874 | ||
3875 | if (!vsi || !vsi->netdev) | |
3876 | return -EINVAL; | |
3877 | ||
3878 | np = netdev_priv(vsi->netdev); | |
3879 | ||
3880 | INIT_LIST_HEAD(&np->tc_indr_block_priv_list); | |
3881 | return flow_indr_dev_register(ice_indr_setup_tc_cb, np); | |
3882 | } | |
3883 | ||
940b61af | 3884 | /** |
8c243700 AV |
3885 | * ice_get_avail_q_count - Get count of queues in use |
3886 | * @pf_qmap: bitmap to get queue use count from | |
3887 | * @lock: pointer to a mutex that protects access to pf_qmap | |
3888 | * @size: size of the bitmap | |
940b61af | 3889 | */ |
8c243700 AV |
3890 | static u16 |
3891 | ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) | |
940b61af | 3892 | { |
88865fc4 KK |
3893 | unsigned long bit; |
3894 | u16 count = 0; | |
940b61af | 3895 | |
8c243700 AV |
3896 | mutex_lock(lock); |
3897 | for_each_clear_bit(bit, pf_qmap, size) | |
3898 | count++; | |
3899 | mutex_unlock(lock); | |
940b61af | 3900 | |
8c243700 AV |
3901 | return count; |
3902 | } | |
d76a60ba | 3903 | |
8c243700 AV |
3904 | /** |
3905 | * ice_get_avail_txq_count - Get count of Tx queues in use | |
3906 | * @pf: pointer to an ice_pf instance | |
3907 | */ | |
3908 | u16 ice_get_avail_txq_count(struct ice_pf *pf) | |
3909 | { | |
3910 | return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, | |
3911 | pf->max_pf_txqs); | |
3912 | } | |
940b61af | 3913 | |
8c243700 AV |
3914 | /** |
3915 | * ice_get_avail_rxq_count - Get count of Rx queues in use | |
3916 | * @pf: pointer to an ice_pf instance | |
3917 | */ | |
3918 | u16 ice_get_avail_rxq_count(struct ice_pf *pf) | |
3919 | { | |
3920 | return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, | |
3921 | pf->max_pf_rxqs); | |
940b61af AV |
3922 | } |
3923 | ||
3924 | /** | |
3925 | * ice_deinit_pf - Unrolls initialziations done by ice_init_pf | |
3926 | * @pf: board private structure to initialize | |
3927 | */ | |
3928 | static void ice_deinit_pf(struct ice_pf *pf) | |
3929 | { | |
8d81fa55 | 3930 | ice_service_task_stop(pf); |
bb52f42a | 3931 | mutex_destroy(&pf->lag_mutex); |
486b9eee | 3932 | mutex_destroy(&pf->adev_mutex); |
940b61af | 3933 | mutex_destroy(&pf->sw_mutex); |
b94b013e | 3934 | mutex_destroy(&pf->tc_mutex); |
940b61af | 3935 | mutex_destroy(&pf->avail_q_mutex); |
3d5985a1 | 3936 | mutex_destroy(&pf->vfs.table_lock); |
78b5713a AV |
3937 | |
3938 | if (pf->avail_txqs) { | |
3939 | bitmap_free(pf->avail_txqs); | |
3940 | pf->avail_txqs = NULL; | |
3941 | } | |
3942 | ||
3943 | if (pf->avail_rxqs) { | |
3944 | bitmap_free(pf->avail_rxqs); | |
3945 | pf->avail_rxqs = NULL; | |
3946 | } | |
06c16d89 JK |
3947 | |
3948 | if (pf->ptp.clock) | |
3949 | ptp_clock_unregister(pf->ptp.clock); | |
940b61af AV |
3950 | } |
3951 | ||
3952 | /** | |
462acf6a TN |
3953 | * ice_set_pf_caps - set PFs capability flags |
3954 | * @pf: pointer to the PF instance | |
940b61af | 3955 | */ |
462acf6a | 3956 | static void ice_set_pf_caps(struct ice_pf *pf) |
940b61af | 3957 | { |
462acf6a TN |
3958 | struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; |
3959 | ||
d25a0fc4 | 3960 | clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
88f62aea | 3961 | if (func_caps->common_cap.rdma) |
d25a0fc4 | 3962 | set_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
462acf6a TN |
3963 | clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
3964 | if (func_caps->common_cap.dcb) | |
80739b57 | 3965 | set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
462acf6a TN |
3966 | clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
3967 | if (func_caps->common_cap.sr_iov_1_1) { | |
75d2b253 | 3968 | set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
000773c0 | 3969 | pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, |
dc36796e | 3970 | ICE_MAX_SRIOV_VFS); |
75d2b253 | 3971 | } |
462acf6a TN |
3972 | clear_bit(ICE_FLAG_RSS_ENA, pf->flags); |
3973 | if (func_caps->common_cap.rss_table_size) | |
3974 | set_bit(ICE_FLAG_RSS_ENA, pf->flags); | |
940b61af | 3975 | |
148beb61 HT |
3976 | clear_bit(ICE_FLAG_FD_ENA, pf->flags); |
3977 | if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { | |
3978 | u16 unused; | |
3979 | ||
3980 | /* ctrl_vsi_idx will be set to a valid value when flow director | |
3981 | * is setup by ice_init_fdir | |
3982 | */ | |
3983 | pf->ctrl_vsi_idx = ICE_NO_VSI; | |
3984 | set_bit(ICE_FLAG_FD_ENA, pf->flags); | |
3985 | /* force guaranteed filter pool for PF */ | |
3986 | ice_alloc_fd_guar_item(&pf->hw, &unused, | |
3987 | func_caps->fd_fltr_guar); | |
3988 | /* force shared filter pool for PF */ | |
3989 | ice_alloc_fd_shrd_item(&pf->hw, &unused, | |
3990 | func_caps->fd_fltr_best_effort); | |
3991 | } | |
3992 | ||
06c16d89 | 3993 | clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); |
ba1124f5 PG |
3994 | if (func_caps->common_cap.ieee_1588 && |
3995 | !(pf->hw.mac_type == ICE_MAC_E830)) | |
06c16d89 JK |
3996 | set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); |
3997 | ||
462acf6a TN |
3998 | pf->max_pf_txqs = func_caps->common_cap.num_txq; |
3999 | pf->max_pf_rxqs = func_caps->common_cap.num_rxq; | |
4000 | } | |
940b61af | 4001 | |
462acf6a TN |
4002 | /** |
4003 | * ice_init_pf - Initialize general software structures (struct ice_pf) | |
4004 | * @pf: board private structure to initialize | |
4005 | */ | |
4006 | static int ice_init_pf(struct ice_pf *pf) | |
4007 | { | |
4008 | ice_set_pf_caps(pf); | |
4009 | ||
4010 | mutex_init(&pf->sw_mutex); | |
b94b013e | 4011 | mutex_init(&pf->tc_mutex); |
486b9eee | 4012 | mutex_init(&pf->adev_mutex); |
bb52f42a | 4013 | mutex_init(&pf->lag_mutex); |
d76a60ba | 4014 | |
d69ea414 JK |
4015 | INIT_HLIST_HEAD(&pf->aq_wait_list); |
4016 | spin_lock_init(&pf->aq_wait_lock); | |
4017 | init_waitqueue_head(&pf->aq_wait_queue); | |
4018 | ||
1c08052e JK |
4019 | init_waitqueue_head(&pf->reset_wait_queue); |
4020 | ||
940b61af AV |
4021 | /* setup service timer and periodic service task */ |
4022 | timer_setup(&pf->serv_tmr, ice_service_timer, 0); | |
4023 | pf->serv_tmr_period = HZ; | |
4024 | INIT_WORK(&pf->serv_task, ice_service_task); | |
7e408e07 | 4025 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
78b5713a | 4026 | |
462acf6a | 4027 | mutex_init(&pf->avail_q_mutex); |
78b5713a AV |
4028 | pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); |
4029 | if (!pf->avail_txqs) | |
4030 | return -ENOMEM; | |
4031 | ||
4032 | pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); | |
4033 | if (!pf->avail_rxqs) { | |
59ac3255 | 4034 | bitmap_free(pf->avail_txqs); |
78b5713a AV |
4035 | pf->avail_txqs = NULL; |
4036 | return -ENOMEM; | |
4037 | } | |
4038 | ||
3d5985a1 JK |
4039 | mutex_init(&pf->vfs.table_lock); |
4040 | hash_init(pf->vfs.table); | |
dde7db63 | 4041 | ice_mbx_init_snapshot(&pf->hw); |
3d5985a1 | 4042 | |
78b5713a | 4043 | return 0; |
940b61af AV |
4044 | } |
4045 | ||
769c500d | 4046 | /** |
31765519 AV |
4047 | * ice_is_wol_supported - check if WoL is supported |
4048 | * @hw: pointer to hardware info | |
769c500d AA |
4049 | * |
4050 | * Check if WoL is supported based on the HW configuration. | |
4051 | * Returns true if NVM supports and enables WoL for this port, false otherwise | |
4052 | */ | |
31765519 | 4053 | bool ice_is_wol_supported(struct ice_hw *hw) |
769c500d | 4054 | { |
769c500d AA |
4055 | u16 wol_ctrl; |
4056 | ||
4057 | /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control | |
4058 | * word) indicates WoL is not supported on the corresponding PF ID. | |
4059 | */ | |
4060 | if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) | |
4061 | return false; | |
4062 | ||
31765519 | 4063 | return !(BIT(hw->port_info->lport) & wol_ctrl); |
769c500d AA |
4064 | } |
4065 | ||
87324e74 HT |
4066 | /** |
4067 | * ice_vsi_recfg_qs - Change the number of queues on a VSI | |
4068 | * @vsi: VSI being changed | |
4069 | * @new_rx: new number of Rx queues | |
4070 | * @new_tx: new number of Tx queues | |
a6a0974a | 4071 | * @locked: is adev device_lock held |
87324e74 HT |
4072 | * |
4073 | * Only change the number of queues if new_tx, or new_rx is non-0. | |
4074 | * | |
4075 | * Returns 0 on success. | |
4076 | */ | |
a6a0974a | 4077 | int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) |
87324e74 HT |
4078 | { |
4079 | struct ice_pf *pf = vsi->back; | |
4080 | int err = 0, timeout = 50; | |
4081 | ||
4082 | if (!new_rx && !new_tx) | |
4083 | return -EINVAL; | |
4084 | ||
7e408e07 | 4085 | while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { |
87324e74 HT |
4086 | timeout--; |
4087 | if (!timeout) | |
4088 | return -EBUSY; | |
4089 | usleep_range(1000, 2000); | |
4090 | } | |
4091 | ||
4092 | if (new_tx) | |
88865fc4 | 4093 | vsi->req_txq = (u16)new_tx; |
87324e74 | 4094 | if (new_rx) |
88865fc4 | 4095 | vsi->req_rxq = (u16)new_rx; |
87324e74 HT |
4096 | |
4097 | /* set for the next time the netdev is started */ | |
4098 | if (!netif_running(vsi->netdev)) { | |
6624e780 | 4099 | ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); |
87324e74 HT |
4100 | dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); |
4101 | goto done; | |
4102 | } | |
4103 | ||
4104 | ice_vsi_close(vsi); | |
6624e780 | 4105 | ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); |
a6a0974a | 4106 | ice_pf_dcb_recfg(pf, locked); |
87324e74 HT |
4107 | ice_vsi_open(vsi); |
4108 | done: | |
7e408e07 | 4109 | clear_bit(ICE_CFG_BUSY, pf->state); |
87324e74 HT |
4110 | return err; |
4111 | } | |
4112 | ||
cd1f56f4 BC |
4113 | /** |
4114 | * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode | |
4115 | * @pf: PF to configure | |
4116 | * | |
4117 | * No VLAN offloads/filtering are advertised in safe mode so make sure the PF | |
4118 | * VSI can still Tx/Rx VLAN tagged packets. | |
4119 | */ | |
4120 | static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) | |
4121 | { | |
4122 | struct ice_vsi *vsi = ice_get_main_vsi(pf); | |
4123 | struct ice_vsi_ctx *ctxt; | |
cd1f56f4 | 4124 | struct ice_hw *hw; |
5518ac2a | 4125 | int status; |
cd1f56f4 BC |
4126 | |
4127 | if (!vsi) | |
4128 | return; | |
4129 | ||
4130 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | |
4131 | if (!ctxt) | |
4132 | return; | |
4133 | ||
4134 | hw = &pf->hw; | |
4135 | ctxt->info = vsi->info; | |
4136 | ||
4137 | ctxt->info.valid_sections = | |
4138 | cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | | |
4139 | ICE_AQ_VSI_PROP_SECURITY_VALID | | |
4140 | ICE_AQ_VSI_PROP_SW_VALID); | |
4141 | ||
4142 | /* disable VLAN anti-spoof */ | |
4143 | ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << | |
4144 | ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); | |
4145 | ||
4146 | /* disable VLAN pruning and keep all other settings */ | |
4147 | ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; | |
4148 | ||
4149 | /* allow all VLANs on Tx and don't strip on Rx */ | |
7bd527aa BC |
4150 | ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | |
4151 | ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING; | |
cd1f56f4 BC |
4152 | |
4153 | status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); | |
4154 | if (status) { | |
5f87ec48 | 4155 | dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", |
5518ac2a | 4156 | status, ice_aq_str(hw->adminq.sq_last_status)); |
cd1f56f4 BC |
4157 | } else { |
4158 | vsi->info.sec_flags = ctxt->info.sec_flags; | |
4159 | vsi->info.sw_flags2 = ctxt->info.sw_flags2; | |
7bd527aa | 4160 | vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; |
cd1f56f4 BC |
4161 | } |
4162 | ||
4163 | kfree(ctxt); | |
4164 | } | |
4165 | ||
462acf6a TN |
4166 | /** |
4167 | * ice_log_pkg_init - log result of DDP package load | |
4168 | * @hw: pointer to hardware info | |
247dd97d | 4169 | * @state: state of package load |
462acf6a | 4170 | */ |
247dd97d | 4171 | static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state) |
462acf6a | 4172 | { |
247dd97d WD |
4173 | struct ice_pf *pf = hw->back; |
4174 | struct device *dev; | |
462acf6a | 4175 | |
247dd97d WD |
4176 | dev = ice_pf_to_dev(pf); |
4177 | ||
4178 | switch (state) { | |
4179 | case ICE_DDP_PKG_SUCCESS: | |
4180 | dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", | |
4181 | hw->active_pkg_name, | |
4182 | hw->active_pkg_ver.major, | |
4183 | hw->active_pkg_ver.minor, | |
4184 | hw->active_pkg_ver.update, | |
4185 | hw->active_pkg_ver.draft); | |
4186 | break; | |
4187 | case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: | |
4188 | dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", | |
4189 | hw->active_pkg_name, | |
4190 | hw->active_pkg_ver.major, | |
4191 | hw->active_pkg_ver.minor, | |
4192 | hw->active_pkg_ver.update, | |
4193 | hw->active_pkg_ver.draft); | |
4194 | break; | |
4195 | case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: | |
4196 | dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", | |
4197 | hw->active_pkg_name, | |
4198 | hw->active_pkg_ver.major, | |
4199 | hw->active_pkg_ver.minor, | |
4200 | ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); | |
4201 | break; | |
4202 | case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: | |
4203 | dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", | |
4204 | hw->active_pkg_name, | |
4205 | hw->active_pkg_ver.major, | |
4206 | hw->active_pkg_ver.minor, | |
4207 | hw->active_pkg_ver.update, | |
4208 | hw->active_pkg_ver.draft, | |
4209 | hw->pkg_name, | |
4210 | hw->pkg_ver.major, | |
4211 | hw->pkg_ver.minor, | |
4212 | hw->pkg_ver.update, | |
4213 | hw->pkg_ver.draft); | |
462acf6a | 4214 | break; |
247dd97d | 4215 | case ICE_DDP_PKG_FW_MISMATCH: |
b8272919 VR |
4216 | dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); |
4217 | break; | |
247dd97d | 4218 | case ICE_DDP_PKG_INVALID_FILE: |
19cce2c6 | 4219 | dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); |
462acf6a | 4220 | break; |
247dd97d WD |
4221 | case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: |
4222 | dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); | |
462acf6a | 4223 | break; |
247dd97d WD |
4224 | case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: |
4225 | dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", | |
4226 | ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); | |
4227 | break; | |
4228 | case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: | |
4229 | dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); | |
4230 | break; | |
4231 | case ICE_DDP_PKG_FILE_REVISION_TOO_LOW: | |
4232 | dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); | |
4233 | break; | |
4234 | case ICE_DDP_PKG_LOAD_ERROR: | |
4235 | dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); | |
0092db5f JB |
4236 | /* poll for reset to complete */ |
4237 | if (ice_check_reset(hw)) | |
4238 | dev_err(dev, "Error resetting device. Please reload the driver\n"); | |
462acf6a | 4239 | break; |
247dd97d WD |
4240 | case ICE_DDP_PKG_ERR: |
4241 | default: | |
4242 | dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); | |
0092db5f | 4243 | break; |
462acf6a TN |
4244 | } |
4245 | } | |
4246 | ||
4247 | /** | |
4248 | * ice_load_pkg - load/reload the DDP Package file | |
4249 | * @firmware: firmware structure when firmware requested or NULL for reload | |
4250 | * @pf: pointer to the PF instance | |
4251 | * | |
4252 | * Called on probe and post CORER/GLOBR rebuild to load DDP Package and | |
4253 | * initialize HW tables. | |
4254 | */ | |
4255 | static void | |
4256 | ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) | |
4257 | { | |
247dd97d | 4258 | enum ice_ddp_state state = ICE_DDP_PKG_ERR; |
4015d11e | 4259 | struct device *dev = ice_pf_to_dev(pf); |
462acf6a TN |
4260 | struct ice_hw *hw = &pf->hw; |
4261 | ||
4262 | /* Load DDP Package */ | |
4263 | if (firmware && !hw->pkg_copy) { | |
247dd97d WD |
4264 | state = ice_copy_and_init_pkg(hw, firmware->data, |
4265 | firmware->size); | |
4266 | ice_log_pkg_init(hw, state); | |
462acf6a TN |
4267 | } else if (!firmware && hw->pkg_copy) { |
4268 | /* Reload package during rebuild after CORER/GLOBR reset */ | |
247dd97d WD |
4269 | state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); |
4270 | ice_log_pkg_init(hw, state); | |
462acf6a | 4271 | } else { |
19cce2c6 | 4272 | dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); |
462acf6a TN |
4273 | } |
4274 | ||
247dd97d | 4275 | if (!ice_is_init_pkg_successful(state)) { |
462acf6a TN |
4276 | /* Safe Mode */ |
4277 | clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); | |
4278 | return; | |
4279 | } | |
4280 | ||
4281 | /* Successful download package is the precondition for advanced | |
4282 | * features, hence setting the ICE_FLAG_ADV_FEATURES flag | |
4283 | */ | |
4284 | set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); | |
4285 | } | |
4286 | ||
c585ea42 BC |
4287 | /** |
4288 | * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines | |
4289 | * @pf: pointer to the PF structure | |
4290 | * | |
4291 | * There is no error returned here because the driver should be able to handle | |
4292 | * 128 Byte cache lines, so we only print a warning in case issues are seen, | |
4293 | * specifically with Tx. | |
4294 | */ | |
4295 | static void ice_verify_cacheline_size(struct ice_pf *pf) | |
4296 | { | |
4297 | if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) | |
19cce2c6 | 4298 | dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", |
c585ea42 BC |
4299 | ICE_CACHE_LINE_BYTES); |
4300 | } | |
4301 | ||
e3710a01 PSJ |
4302 | /** |
4303 | * ice_send_version - update firmware with driver version | |
4304 | * @pf: PF struct | |
4305 | * | |
d54699e2 | 4306 | * Returns 0 on success, else error code |
e3710a01 | 4307 | */ |
5e24d598 | 4308 | static int ice_send_version(struct ice_pf *pf) |
e3710a01 PSJ |
4309 | { |
4310 | struct ice_driver_ver dv; | |
4311 | ||
34a2a3b8 JK |
4312 | dv.major_ver = 0xff; |
4313 | dv.minor_ver = 0xff; | |
4314 | dv.build_ver = 0xff; | |
e3710a01 | 4315 | dv.subbuild_ver = 0; |
34a2a3b8 | 4316 | strscpy((char *)dv.driver_string, UTS_RELEASE, |
e3710a01 PSJ |
4317 | sizeof(dv.driver_string)); |
4318 | return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); | |
4319 | } | |
4320 | ||
148beb61 HT |
4321 | /** |
4322 | * ice_init_fdir - Initialize flow director VSI and configuration | |
4323 | * @pf: pointer to the PF instance | |
4324 | * | |
4325 | * returns 0 on success, negative on error | |
4326 | */ | |
4327 | static int ice_init_fdir(struct ice_pf *pf) | |
4328 | { | |
4329 | struct device *dev = ice_pf_to_dev(pf); | |
4330 | struct ice_vsi *ctrl_vsi; | |
4331 | int err; | |
4332 | ||
4333 | /* Side Band Flow Director needs to have a control VSI. | |
4334 | * Allocate it and store it in the PF. | |
4335 | */ | |
4336 | ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); | |
4337 | if (!ctrl_vsi) { | |
4338 | dev_dbg(dev, "could not create control VSI\n"); | |
4339 | return -ENOMEM; | |
4340 | } | |
4341 | ||
4342 | err = ice_vsi_open_ctrl(ctrl_vsi); | |
4343 | if (err) { | |
4344 | dev_dbg(dev, "could not open control VSI\n"); | |
4345 | goto err_vsi_open; | |
4346 | } | |
4347 | ||
4348 | mutex_init(&pf->hw.fdir_fltr_lock); | |
4349 | ||
4350 | err = ice_fdir_create_dflt_rules(pf); | |
4351 | if (err) | |
4352 | goto err_fdir_rule; | |
4353 | ||
4354 | return 0; | |
4355 | ||
4356 | err_fdir_rule: | |
4357 | ice_fdir_release_flows(&pf->hw); | |
4358 | ice_vsi_close(ctrl_vsi); | |
4359 | err_vsi_open: | |
4360 | ice_vsi_release(ctrl_vsi); | |
4361 | if (pf->ctrl_vsi_idx != ICE_NO_VSI) { | |
4362 | pf->vsi[pf->ctrl_vsi_idx] = NULL; | |
4363 | pf->ctrl_vsi_idx = ICE_NO_VSI; | |
4364 | } | |
4365 | return err; | |
4366 | } | |
4367 | ||
5b246e53 MS |
4368 | static void ice_deinit_fdir(struct ice_pf *pf) |
4369 | { | |
4370 | struct ice_vsi *vsi = ice_get_ctrl_vsi(pf); | |
4371 | ||
4372 | if (!vsi) | |
4373 | return; | |
4374 | ||
4375 | ice_vsi_manage_fdir(vsi, false); | |
4376 | ice_vsi_release(vsi); | |
4377 | if (pf->ctrl_vsi_idx != ICE_NO_VSI) { | |
4378 | pf->vsi[pf->ctrl_vsi_idx] = NULL; | |
4379 | pf->ctrl_vsi_idx = ICE_NO_VSI; | |
4380 | } | |
4381 | ||
4382 | mutex_destroy(&(&pf->hw)->fdir_fltr_lock); | |
4383 | } | |
4384 | ||
462acf6a TN |
4385 | /** |
4386 | * ice_get_opt_fw_name - return optional firmware file name or NULL | |
4387 | * @pf: pointer to the PF instance | |
4388 | */ | |
4389 | static char *ice_get_opt_fw_name(struct ice_pf *pf) | |
4390 | { | |
4391 | /* Optional firmware name same as default with additional dash | |
4392 | * followed by a EUI-64 identifier (PCIe Device Serial Number) | |
4393 | */ | |
4394 | struct pci_dev *pdev = pf->pdev; | |
ceb2f007 JK |
4395 | char *opt_fw_filename; |
4396 | u64 dsn; | |
462acf6a TN |
4397 | |
4398 | /* Determine the name of the optional file using the DSN (two | |
4399 | * dwords following the start of the DSN Capability). | |
4400 | */ | |
ceb2f007 JK |
4401 | dsn = pci_get_dsn(pdev); |
4402 | if (!dsn) | |
4403 | return NULL; | |
4404 | ||
4405 | opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); | |
4406 | if (!opt_fw_filename) | |
4407 | return NULL; | |
4408 | ||
1a9c561a | 4409 | snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", |
ceb2f007 | 4410 | ICE_DDP_PKG_PATH, dsn); |
462acf6a TN |
4411 | |
4412 | return opt_fw_filename; | |
4413 | } | |
4414 | ||
4415 | /** | |
4416 | * ice_request_fw - Device initialization routine | |
4417 | * @pf: pointer to the PF instance | |
4418 | */ | |
4419 | static void ice_request_fw(struct ice_pf *pf) | |
4420 | { | |
4421 | char *opt_fw_filename = ice_get_opt_fw_name(pf); | |
4422 | const struct firmware *firmware = NULL; | |
4015d11e | 4423 | struct device *dev = ice_pf_to_dev(pf); |
462acf6a TN |
4424 | int err = 0; |
4425 | ||
4426 | /* optional device-specific DDP (if present) overrides the default DDP | |
4427 | * package file. kernel logs a debug message if the file doesn't exist, | |
4428 | * and warning messages for other errors. | |
4429 | */ | |
4430 | if (opt_fw_filename) { | |
4431 | err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); | |
4432 | if (err) { | |
4433 | kfree(opt_fw_filename); | |
4434 | goto dflt_pkg_load; | |
4435 | } | |
4436 | ||
4437 | /* request for firmware was successful. Download to device */ | |
4438 | ice_load_pkg(firmware, pf); | |
4439 | kfree(opt_fw_filename); | |
4440 | release_firmware(firmware); | |
4441 | return; | |
4442 | } | |
4443 | ||
4444 | dflt_pkg_load: | |
4445 | err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); | |
4446 | if (err) { | |
19cce2c6 | 4447 | dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); |
462acf6a TN |
4448 | return; |
4449 | } | |
4450 | ||
4451 | /* request for firmware was successful. Download to device */ | |
4452 | ice_load_pkg(firmware, pf); | |
4453 | release_firmware(firmware); | |
4454 | } | |
4455 | ||
769c500d AA |
4456 | /** |
4457 | * ice_print_wake_reason - show the wake up cause in the log | |
4458 | * @pf: pointer to the PF struct | |
4459 | */ | |
4460 | static void ice_print_wake_reason(struct ice_pf *pf) | |
4461 | { | |
4462 | u32 wus = pf->wakeup_reason; | |
4463 | const char *wake_str; | |
4464 | ||
4465 | /* if no wake event, nothing to print */ | |
4466 | if (!wus) | |
4467 | return; | |
4468 | ||
4469 | if (wus & PFPM_WUS_LNKC_M) | |
4470 | wake_str = "Link\n"; | |
4471 | else if (wus & PFPM_WUS_MAG_M) | |
4472 | wake_str = "Magic Packet\n"; | |
4473 | else if (wus & PFPM_WUS_MNG_M) | |
4474 | wake_str = "Management\n"; | |
4475 | else if (wus & PFPM_WUS_FW_RST_WK_M) | |
4476 | wake_str = "Firmware Reset\n"; | |
4477 | else | |
4478 | wake_str = "Unknown\n"; | |
4479 | ||
4480 | dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); | |
4481 | } | |
4482 | ||
96a9a934 PSJ |
4483 | /** |
4484 | * ice_pf_fwlog_update_module - update 1 module | |
4485 | * @pf: pointer to the PF struct | |
4486 | * @log_level: log_level to use for the @module | |
4487 | * @module: module to update | |
4488 | */ | |
4489 | void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module) | |
4490 | { | |
4491 | struct ice_hw *hw = &pf->hw; | |
4492 | ||
4493 | hw->fwlog_cfg.module_entries[module].log_level = log_level; | |
4494 | } | |
4495 | ||
1e23f076 | 4496 | /** |
418e5340 | 4497 | * ice_register_netdev - register netdev |
5b246e53 | 4498 | * @vsi: pointer to the VSI struct |
1e23f076 | 4499 | */ |
5b246e53 | 4500 | static int ice_register_netdev(struct ice_vsi *vsi) |
1e23f076 | 4501 | { |
5b246e53 | 4502 | int err; |
1e23f076 | 4503 | |
1e23f076 AV |
4504 | if (!vsi || !vsi->netdev) |
4505 | return -EIO; | |
4506 | ||
4507 | err = register_netdev(vsi->netdev); | |
4508 | if (err) | |
5b246e53 | 4509 | return err; |
1e23f076 | 4510 | |
a476d72a | 4511 | set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); |
1e23f076 AV |
4512 | netif_carrier_off(vsi->netdev); |
4513 | netif_tx_stop_all_queues(vsi->netdev); | |
1e23f076 | 4514 | |
1e23f076 | 4515 | return 0; |
5b246e53 MS |
4516 | } |
4517 | ||
4518 | static void ice_unregister_netdev(struct ice_vsi *vsi) | |
4519 | { | |
4520 | if (!vsi || !vsi->netdev) | |
4521 | return; | |
4522 | ||
4523 | unregister_netdev(vsi->netdev); | |
4524 | clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); | |
1e23f076 AV |
4525 | } |
4526 | ||
837f08fd | 4527 | /** |
5b246e53 MS |
4528 | * ice_cfg_netdev - Allocate, configure and register a netdev |
4529 | * @vsi: the VSI associated with the new netdev | |
837f08fd | 4530 | * |
5b246e53 | 4531 | * Returns 0 on success, negative value on failure |
837f08fd | 4532 | */ |
5b246e53 | 4533 | static int ice_cfg_netdev(struct ice_vsi *vsi) |
837f08fd | 4534 | { |
5b246e53 MS |
4535 | struct ice_netdev_priv *np; |
4536 | struct net_device *netdev; | |
4537 | u8 mac_addr[ETH_ALEN]; | |
837f08fd | 4538 | |
5b246e53 MS |
4539 | netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, |
4540 | vsi->alloc_rxq); | |
4541 | if (!netdev) | |
4542 | return -ENOMEM; | |
4543 | ||
4544 | set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); | |
4545 | vsi->netdev = netdev; | |
4546 | np = netdev_priv(netdev); | |
4547 | np->vsi = vsi; | |
4548 | ||
4549 | ice_set_netdev_features(netdev); | |
b6a4103c | 4550 | ice_set_ops(vsi); |
5b246e53 MS |
4551 | |
4552 | if (vsi->type == ICE_VSI_PF) { | |
4553 | SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); | |
4554 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); | |
4555 | eth_hw_addr_set(netdev, mac_addr); | |
50ac7479 AV |
4556 | } |
4557 | ||
5b246e53 MS |
4558 | netdev->priv_flags |= IFF_UNICAST_FLT; |
4559 | ||
4560 | /* Setup netdev TC information */ | |
4561 | ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); | |
4562 | ||
4563 | netdev->max_mtu = ICE_MAX_MTU; | |
4564 | ||
4565 | return 0; | |
4566 | } | |
4567 | ||
4568 | static void ice_decfg_netdev(struct ice_vsi *vsi) | |
4569 | { | |
4570 | clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); | |
4571 | free_netdev(vsi->netdev); | |
4572 | vsi->netdev = NULL; | |
4573 | } | |
4574 | ||
4575 | static int ice_start_eth(struct ice_vsi *vsi) | |
4576 | { | |
4577 | int err; | |
4578 | ||
4579 | err = ice_init_mac_fltr(vsi->back); | |
837f08fd AV |
4580 | if (err) |
4581 | return err; | |
4582 | ||
5b246e53 | 4583 | err = ice_vsi_open(vsi); |
b3e7b3a6 MS |
4584 | if (err) |
4585 | ice_fltr_remove_all(vsi); | |
837f08fd | 4586 | |
5b246e53 MS |
4587 | return err; |
4588 | } | |
837f08fd | 4589 | |
7d46c0e6 MS |
4590 | static void ice_stop_eth(struct ice_vsi *vsi) |
4591 | { | |
4592 | ice_fltr_remove_all(vsi); | |
4593 | ice_vsi_close(vsi); | |
4594 | } | |
4595 | ||
5b246e53 MS |
4596 | static int ice_init_eth(struct ice_pf *pf) |
4597 | { | |
4598 | struct ice_vsi *vsi = ice_get_main_vsi(pf); | |
4599 | int err; | |
73e30a62 | 4600 | |
5b246e53 MS |
4601 | if (!vsi) |
4602 | return -EINVAL; | |
837f08fd | 4603 | |
5b246e53 MS |
4604 | /* init channel list */ |
4605 | INIT_LIST_HEAD(&vsi->ch_list); | |
837f08fd | 4606 | |
5b246e53 MS |
4607 | err = ice_cfg_netdev(vsi); |
4608 | if (err) | |
4609 | return err; | |
4610 | /* Setup DCB netlink interface */ | |
4611 | ice_dcbnl_setup(vsi); | |
837f08fd | 4612 | |
5b246e53 MS |
4613 | err = ice_init_mac_fltr(pf); |
4614 | if (err) | |
4615 | goto err_init_mac_fltr; | |
4e56802e | 4616 | |
5b246e53 MS |
4617 | err = ice_devlink_create_pf_port(pf); |
4618 | if (err) | |
4619 | goto err_devlink_create_pf_port; | |
f31e4b6f | 4620 | |
5b246e53 | 4621 | SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); |
837f08fd | 4622 | |
5b246e53 MS |
4623 | err = ice_register_netdev(vsi); |
4624 | if (err) | |
4625 | goto err_register_netdev; | |
7ec59eea | 4626 | |
5b246e53 MS |
4627 | err = ice_tc_indir_block_register(vsi); |
4628 | if (err) | |
4629 | goto err_tc_indir_block_register; | |
f31e4b6f | 4630 | |
5b246e53 | 4631 | ice_napi_add(vsi); |
40b24760 | 4632 | |
5b246e53 MS |
4633 | return 0; |
4634 | ||
4635 | err_tc_indir_block_register: | |
4636 | ice_unregister_netdev(vsi); | |
4637 | err_register_netdev: | |
4638 | ice_devlink_destroy_pf_port(pf); | |
4639 | err_devlink_create_pf_port: | |
4640 | err_init_mac_fltr: | |
4641 | ice_decfg_netdev(vsi); | |
4642 | return err; | |
4643 | } | |
4644 | ||
4645 | static void ice_deinit_eth(struct ice_pf *pf) | |
4646 | { | |
4647 | struct ice_vsi *vsi = ice_get_main_vsi(pf); | |
4648 | ||
4649 | if (!vsi) | |
4650 | return; | |
4651 | ||
4652 | ice_vsi_close(vsi); | |
4653 | ice_unregister_netdev(vsi); | |
4654 | ice_devlink_destroy_pf_port(pf); | |
4655 | ice_tc_indir_block_unregister(vsi); | |
4656 | ice_decfg_netdev(vsi); | |
4657 | } | |
4658 | ||
5708155d JS |
4659 | /** |
4660 | * ice_wait_for_fw - wait for full FW readiness | |
4661 | * @hw: pointer to the hardware structure | |
4662 | * @timeout: milliseconds that can elapse before timing out | |
4663 | */ | |
4664 | static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) | |
4665 | { | |
4666 | int fw_loading; | |
4667 | u32 elapsed = 0; | |
4668 | ||
4669 | while (elapsed <= timeout) { | |
4670 | fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; | |
4671 | ||
4672 | /* firmware was not yet loaded, we have to wait more */ | |
4673 | if (fw_loading) { | |
4674 | elapsed += 100; | |
4675 | msleep(100); | |
4676 | continue; | |
4677 | } | |
4678 | return 0; | |
4679 | } | |
4680 | ||
4681 | return -ETIMEDOUT; | |
4682 | } | |
4683 | ||
5b246e53 MS |
4684 | static int ice_init_dev(struct ice_pf *pf) |
4685 | { | |
4686 | struct device *dev = ice_pf_to_dev(pf); | |
4687 | struct ice_hw *hw = &pf->hw; | |
4688 | int err; | |
4689 | ||
4690 | err = ice_init_hw(hw); | |
4691 | if (err) { | |
4692 | dev_err(dev, "ice_init_hw failed: %d\n", err); | |
4693 | return err; | |
4694 | } | |
4695 | ||
5708155d JS |
4696 | /* Some cards require longer initialization times |
4697 | * due to necessity of loading FW from an external source. | |
4698 | * This can take even half a minute. | |
4699 | */ | |
4700 | if (ice_is_pf_c827(hw)) { | |
4701 | err = ice_wait_for_fw(hw, 30000); | |
4702 | if (err) { | |
4703 | dev_err(dev, "ice_wait_for_fw timed out"); | |
4704 | return err; | |
4705 | } | |
4706 | } | |
4707 | ||
5b246e53 MS |
4708 | ice_init_feature_support(pf); |
4709 | ||
4710 | ice_request_fw(pf); | |
462acf6a TN |
4711 | |
4712 | /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be | |
4713 | * set in pf->state, which will cause ice_is_safe_mode to return | |
4714 | * true | |
4715 | */ | |
4716 | if (ice_is_safe_mode(pf)) { | |
462acf6a TN |
4717 | /* we already got function/device capabilities but these don't |
4718 | * reflect what the driver needs to do in safe mode. Instead of | |
4719 | * adding conditional logic everywhere to ignore these | |
4720 | * device/function capabilities, override them. | |
4721 | */ | |
4722 | ice_set_safe_mode_caps(hw); | |
4723 | } | |
4724 | ||
78b5713a AV |
4725 | err = ice_init_pf(pf); |
4726 | if (err) { | |
4727 | dev_err(dev, "ice_init_pf failed: %d\n", err); | |
5b246e53 | 4728 | goto err_init_pf; |
78b5713a | 4729 | } |
940b61af | 4730 | |
b20e6c17 JK |
4731 | pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; |
4732 | pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; | |
4733 | pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; | |
4734 | pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; | |
b20e6c17 | 4735 | if (pf->hw.tnl.valid_count[TNL_VXLAN]) { |
5b246e53 | 4736 | pf->hw.udp_tunnel_nic.tables[0].n_entries = |
b20e6c17 | 4737 | pf->hw.tnl.valid_count[TNL_VXLAN]; |
5b246e53 | 4738 | pf->hw.udp_tunnel_nic.tables[0].tunnel_types = |
b20e6c17 | 4739 | UDP_TUNNEL_TYPE_VXLAN; |
b20e6c17 JK |
4740 | } |
4741 | if (pf->hw.tnl.valid_count[TNL_GENEVE]) { | |
5b246e53 | 4742 | pf->hw.udp_tunnel_nic.tables[1].n_entries = |
b20e6c17 | 4743 | pf->hw.tnl.valid_count[TNL_GENEVE]; |
5b246e53 | 4744 | pf->hw.udp_tunnel_nic.tables[1].tunnel_types = |
b20e6c17 | 4745 | UDP_TUNNEL_TYPE_GENEVE; |
288ecf49 BM |
4746 | } |
4747 | ||
940b61af AV |
4748 | err = ice_init_interrupt_scheme(pf); |
4749 | if (err) { | |
77ed84f4 | 4750 | dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); |
940b61af | 4751 | err = -EIO; |
5b246e53 | 4752 | goto err_init_interrupt_scheme; |
940b61af AV |
4753 | } |
4754 | ||
4755 | /* In case of MSIX we are going to setup the misc vector right here | |
4756 | * to handle admin queue events etc. In case of legacy and MSI | |
4757 | * the misc functionality and queue processing is combined in | |
4758 | * the same vector and that gets setup at open. | |
4759 | */ | |
ba880734 BC |
4760 | err = ice_req_irq_msix_misc(pf); |
4761 | if (err) { | |
4762 | dev_err(dev, "setup of misc vector failed: %d\n", err); | |
5b246e53 | 4763 | goto err_req_irq_msix_misc; |
940b61af AV |
4764 | } |
4765 | ||
5b246e53 | 4766 | return 0; |
940b61af | 4767 | |
5b246e53 MS |
4768 | err_req_irq_msix_misc: |
4769 | ice_clear_interrupt_scheme(pf); | |
4770 | err_init_interrupt_scheme: | |
4771 | ice_deinit_pf(pf); | |
4772 | err_init_pf: | |
4773 | ice_deinit_hw(hw); | |
4774 | return err; | |
4775 | } | |
b1edc14a | 4776 | |
5b246e53 MS |
4777 | static void ice_deinit_dev(struct ice_pf *pf) |
4778 | { | |
4779 | ice_free_irq_msix_misc(pf); | |
5b246e53 MS |
4780 | ice_deinit_pf(pf); |
4781 | ice_deinit_hw(&pf->hw); | |
24b454bc JB |
4782 | |
4783 | /* Service task is already stopped, so call reset directly. */ | |
4784 | ice_reset(&pf->hw, ICE_RESET_PFR); | |
4785 | pci_wait_for_pending_transaction(pf->pdev); | |
4786 | ice_clear_interrupt_scheme(pf); | |
5b246e53 | 4787 | } |
940b61af | 4788 | |
5b246e53 MS |
4789 | static void ice_init_features(struct ice_pf *pf) |
4790 | { | |
4791 | struct device *dev = ice_pf_to_dev(pf); | |
940b61af | 4792 | |
5b246e53 MS |
4793 | if (ice_is_safe_mode(pf)) |
4794 | return; | |
9daf8208 | 4795 | |
5b246e53 MS |
4796 | /* initialize DDP driven features */ |
4797 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) | |
4798 | ice_ptp_init(pf); | |
9daf8208 | 4799 | |
5b246e53 MS |
4800 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) |
4801 | ice_gnss_init(pf); | |
4802 | ||
d7999f5e AK |
4803 | if (ice_is_feature_supported(pf, ICE_F_CGU) || |
4804 | ice_is_feature_supported(pf, ICE_F_PHY_RCLK)) | |
4805 | ice_dpll_init(pf); | |
4806 | ||
5b246e53 MS |
4807 | /* Note: Flow director init failure is non-fatal to load */ |
4808 | if (ice_init_fdir(pf)) | |
4809 | dev_err(dev, "could not initialize flow director\n"); | |
4810 | ||
4811 | /* Note: DCB init failure is non-fatal to load */ | |
4812 | if (ice_init_pf_dcb(pf, false)) { | |
4813 | clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); | |
4814 | clear_bit(ICE_FLAG_DCB_ENA, pf->flags); | |
4815 | } else { | |
4816 | ice_cfg_lldp_mib_change(&pf->hw, true); | |
e3710a01 PSJ |
4817 | } |
4818 | ||
5b246e53 MS |
4819 | if (ice_init_lag(pf)) |
4820 | dev_warn(dev, "Failed to init link aggregation support\n"); | |
4da71a77 KK |
4821 | |
4822 | ice_hwmon_init(pf); | |
5b246e53 MS |
4823 | } |
4824 | ||
4825 | static void ice_deinit_features(struct ice_pf *pf) | |
4826 | { | |
42066c4d MP |
4827 | if (ice_is_safe_mode(pf)) |
4828 | return; | |
4829 | ||
5b246e53 MS |
4830 | ice_deinit_lag(pf); |
4831 | if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) | |
4832 | ice_cfg_lldp_mib_change(&pf->hw, false); | |
4833 | ice_deinit_fdir(pf); | |
4834 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) | |
4835 | ice_gnss_exit(pf); | |
4836 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) | |
4837 | ice_ptp_release(pf); | |
d7999f5e AK |
4838 | if (test_bit(ICE_FLAG_DPLL, pf->flags)) |
4839 | ice_dpll_deinit(pf); | |
af41b185 MS |
4840 | if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) |
4841 | xa_destroy(&pf->eswitch.reprs); | |
5b246e53 MS |
4842 | } |
4843 | ||
4844 | static void ice_init_wakeup(struct ice_pf *pf) | |
4845 | { | |
4846 | /* Save wakeup reason register for later use */ | |
4847 | pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); | |
4848 | ||
4849 | /* check for a power management event */ | |
4850 | ice_print_wake_reason(pf); | |
4851 | ||
4852 | /* clear wake status, all bits */ | |
4853 | wr32(&pf->hw, PFPM_WUS, U32_MAX); | |
4854 | ||
4855 | /* Disable WoL at init, wait for user to enable */ | |
4856 | device_set_wakeup_enable(ice_pf_to_dev(pf), false); | |
4857 | } | |
4858 | ||
4859 | static int ice_init_link(struct ice_pf *pf) | |
4860 | { | |
4861 | struct device *dev = ice_pf_to_dev(pf); | |
4862 | int err; | |
9daf8208 | 4863 | |
250c3b3e BC |
4864 | err = ice_init_link_events(pf->hw.port_info); |
4865 | if (err) { | |
4866 | dev_err(dev, "ice_init_link_events failed: %d\n", err); | |
5b246e53 | 4867 | return err; |
250c3b3e BC |
4868 | } |
4869 | ||
08771bce | 4870 | /* not a fatal error if this fails */ |
1a3571b5 | 4871 | err = ice_init_nvm_phy_type(pf->hw.port_info); |
08771bce | 4872 | if (err) |
1a3571b5 | 4873 | dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); |
1a3571b5 | 4874 | |
08771bce | 4875 | /* not a fatal error if this fails */ |
1a3571b5 | 4876 | err = ice_update_link_info(pf->hw.port_info); |
08771bce | 4877 | if (err) |
1a3571b5 | 4878 | dev_err(dev, "ice_update_link_info failed: %d\n", err); |
1a3571b5 | 4879 | |
ea78ce4d PG |
4880 | ice_init_link_dflt_override(pf->hw.port_info); |
4881 | ||
99d40752 BC |
4882 | ice_check_link_cfg_err(pf, |
4883 | pf->hw.port_info->phy.link_info.link_cfg_err); | |
c77849f5 | 4884 | |
1a3571b5 PG |
4885 | /* if media available, initialize PHY settings */ |
4886 | if (pf->hw.port_info->phy.link_info.link_info & | |
4887 | ICE_AQ_MEDIA_AVAILABLE) { | |
08771bce | 4888 | /* not a fatal error if this fails */ |
1a3571b5 | 4889 | err = ice_init_phy_user_cfg(pf->hw.port_info); |
08771bce | 4890 | if (err) |
1a3571b5 | 4891 | dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); |
1a3571b5 PG |
4892 | |
4893 | if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { | |
4894 | struct ice_vsi *vsi = ice_get_main_vsi(pf); | |
4895 | ||
4896 | if (vsi) | |
4897 | ice_configure_phy(vsi); | |
4898 | } | |
4899 | } else { | |
4900 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); | |
4901 | } | |
4902 | ||
5b246e53 MS |
4903 | return err; |
4904 | } | |
c585ea42 | 4905 | |
5b246e53 MS |
4906 | static int ice_init_pf_sw(struct ice_pf *pf) |
4907 | { | |
4908 | bool dvm = ice_is_dvm_ena(&pf->hw); | |
4909 | struct ice_vsi *vsi; | |
4910 | int err; | |
769c500d | 4911 | |
5b246e53 MS |
4912 | /* create switch struct for the switch element created by FW on boot */ |
4913 | pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); | |
4914 | if (!pf->first_sw) | |
4915 | return -ENOMEM; | |
769c500d | 4916 | |
5b246e53 MS |
4917 | if (pf->hw.evb_veb) |
4918 | pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; | |
4919 | else | |
4920 | pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; | |
769c500d | 4921 | |
5b246e53 | 4922 | pf->first_sw->pf = pf; |
769c500d | 4923 | |
5b246e53 MS |
4924 | /* record the sw_id available for later use */ |
4925 | pf->first_sw->sw_id = pf->hw.port_info->sw_id; | |
4926 | ||
4927 | err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); | |
4928 | if (err) | |
4929 | goto err_aq_set_port_params; | |
4930 | ||
4931 | vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); | |
4932 | if (!vsi) { | |
4933 | err = -ENOMEM; | |
4934 | goto err_pf_vsi_setup; | |
cd1f56f4 | 4935 | } |
462acf6a | 4936 | |
5b246e53 | 4937 | return 0; |
462acf6a | 4938 | |
5b246e53 MS |
4939 | err_pf_vsi_setup: |
4940 | err_aq_set_port_params: | |
4941 | kfree(pf->first_sw); | |
4942 | return err; | |
4943 | } | |
43113ff7 | 4944 | |
5b246e53 MS |
4945 | static void ice_deinit_pf_sw(struct ice_pf *pf) |
4946 | { | |
4947 | struct ice_vsi *vsi = ice_get_main_vsi(pf); | |
148beb61 | 4948 | |
5b246e53 MS |
4949 | if (!vsi) |
4950 | return; | |
4951 | ||
4952 | ice_vsi_release(vsi); | |
4953 | kfree(pf->first_sw); | |
4954 | } | |
4955 | ||
4956 | static int ice_alloc_vsis(struct ice_pf *pf) | |
4957 | { | |
4958 | struct device *dev = ice_pf_to_dev(pf); | |
4959 | ||
4960 | pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; | |
4961 | if (!pf->num_alloc_vsi) | |
4962 | return -EIO; | |
4963 | ||
4964 | if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { | |
4965 | dev_warn(dev, | |
4966 | "limiting the VSI count due to UDP tunnel limitation %d > %d\n", | |
4967 | pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); | |
4968 | pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; | |
462acf6a TN |
4969 | } |
4970 | ||
5b246e53 MS |
4971 | pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), |
4972 | GFP_KERNEL); | |
4973 | if (!pf->vsi) | |
4974 | return -ENOMEM; | |
df006dd4 | 4975 | |
5b246e53 MS |
4976 | pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, |
4977 | sizeof(*pf->vsi_stats), GFP_KERNEL); | |
4978 | if (!pf->vsi_stats) { | |
4979 | devm_kfree(dev, pf->vsi); | |
4980 | return -ENOMEM; | |
4981 | } | |
e18ff118 | 4982 | |
5b246e53 MS |
4983 | return 0; |
4984 | } | |
4985 | ||
4986 | static void ice_dealloc_vsis(struct ice_pf *pf) | |
4987 | { | |
4988 | devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); | |
4989 | pf->vsi_stats = NULL; | |
4990 | ||
4991 | pf->num_alloc_vsi = 0; | |
4992 | devm_kfree(ice_pf_to_dev(pf), pf->vsi); | |
4993 | pf->vsi = NULL; | |
4994 | } | |
4995 | ||
4996 | static int ice_init_devlink(struct ice_pf *pf) | |
4997 | { | |
4998 | int err; | |
4999 | ||
5000 | err = ice_devlink_register_params(pf); | |
418e5340 | 5001 | if (err) |
5b246e53 | 5002 | return err; |
418e5340 | 5003 | |
5b246e53 MS |
5004 | ice_devlink_init_regions(pf); |
5005 | ice_devlink_register(pf); | |
418e5340 | 5006 | |
5b246e53 MS |
5007 | return 0; |
5008 | } | |
5009 | ||
5010 | static void ice_deinit_devlink(struct ice_pf *pf) | |
5011 | { | |
5012 | ice_devlink_unregister(pf); | |
5013 | ice_devlink_destroy_regions(pf); | |
5014 | ice_devlink_unregister_params(pf); | |
5015 | } | |
418e5340 | 5016 | |
5b246e53 MS |
5017 | static int ice_init(struct ice_pf *pf) |
5018 | { | |
5019 | int err; | |
5020 | ||
5021 | err = ice_init_dev(pf); | |
1e23f076 | 5022 | if (err) |
5b246e53 | 5023 | return err; |
1e23f076 | 5024 | |
5b246e53 MS |
5025 | err = ice_alloc_vsis(pf); |
5026 | if (err) | |
5027 | goto err_alloc_vsis; | |
5028 | ||
5029 | err = ice_init_pf_sw(pf); | |
5030 | if (err) | |
5031 | goto err_init_pf_sw; | |
5032 | ||
5033 | ice_init_wakeup(pf); | |
5034 | ||
5035 | err = ice_init_link(pf); | |
e523af4e | 5036 | if (err) |
5b246e53 MS |
5037 | goto err_init_link; |
5038 | ||
5039 | err = ice_send_version(pf); | |
5040 | if (err) | |
5041 | goto err_init_link; | |
5042 | ||
5043 | ice_verify_cacheline_size(pf); | |
5044 | ||
5045 | if (ice_is_safe_mode(pf)) | |
5046 | ice_set_safe_mode_vlan_cfg(pf); | |
5047 | else | |
5048 | /* print PCI link speed and width */ | |
5049 | pcie_print_link_status(pf->pdev); | |
e523af4e | 5050 | |
de75135b | 5051 | /* ready to go, so clear down state bit */ |
7e408e07 | 5052 | clear_bit(ICE_DOWN, pf->state); |
5b246e53 MS |
5053 | clear_bit(ICE_SERVICE_DIS, pf->state); |
5054 | ||
5055 | /* since everything is good, start the service timer */ | |
5056 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); | |
5057 | ||
5058 | return 0; | |
5059 | ||
5060 | err_init_link: | |
5061 | ice_deinit_pf_sw(pf); | |
5062 | err_init_pf_sw: | |
5063 | ice_dealloc_vsis(pf); | |
5064 | err_alloc_vsis: | |
5065 | ice_deinit_dev(pf); | |
5066 | return err; | |
5067 | } | |
5068 | ||
5069 | static void ice_deinit(struct ice_pf *pf) | |
5070 | { | |
5071 | set_bit(ICE_SERVICE_DIS, pf->state); | |
5072 | set_bit(ICE_DOWN, pf->state); | |
5073 | ||
5074 | ice_deinit_pf_sw(pf); | |
5075 | ice_dealloc_vsis(pf); | |
5076 | ice_deinit_dev(pf); | |
5077 | } | |
5078 | ||
5079 | /** | |
5080 | * ice_load - load pf by init hw and starting VSI | |
5081 | * @pf: pointer to the pf instance | |
5082 | */ | |
5083 | int ice_load(struct ice_pf *pf) | |
5084 | { | |
5e509ab2 | 5085 | struct ice_vsi_cfg_params params = {}; |
5b246e53 MS |
5086 | struct ice_vsi *vsi; |
5087 | int err; | |
5088 | ||
5b246e53 MS |
5089 | err = ice_init_dev(pf); |
5090 | if (err) | |
5091 | return err; | |
5092 | ||
5093 | vsi = ice_get_main_vsi(pf); | |
5e509ab2 JK |
5094 | |
5095 | params = ice_vsi_to_params(vsi); | |
5096 | params.flags = ICE_VSI_FLAG_INIT; | |
5097 | ||
b3e7b3a6 | 5098 | rtnl_lock(); |
5e509ab2 | 5099 | err = ice_vsi_cfg(vsi, ¶ms); |
5b246e53 MS |
5100 | if (err) |
5101 | goto err_vsi_cfg; | |
5102 | ||
5103 | err = ice_start_eth(ice_get_main_vsi(pf)); | |
5104 | if (err) | |
5105 | goto err_start_eth; | |
b3e7b3a6 | 5106 | rtnl_unlock(); |
5b246e53 | 5107 | |
2b8db6af | 5108 | err = ice_init_rdma(pf); |
5b246e53 MS |
5109 | if (err) |
5110 | goto err_init_rdma; | |
5111 | ||
5112 | ice_init_features(pf); | |
5113 | ice_service_task_restart(pf); | |
5114 | ||
5115 | clear_bit(ICE_DOWN, pf->state); | |
5116 | ||
5117 | return 0; | |
5118 | ||
5119 | err_init_rdma: | |
5120 | ice_vsi_close(ice_get_main_vsi(pf)); | |
b3e7b3a6 | 5121 | rtnl_lock(); |
5b246e53 MS |
5122 | err_start_eth: |
5123 | ice_vsi_decfg(ice_get_main_vsi(pf)); | |
5124 | err_vsi_cfg: | |
b3e7b3a6 | 5125 | rtnl_unlock(); |
5b246e53 MS |
5126 | ice_deinit_dev(pf); |
5127 | return err; | |
5128 | } | |
5129 | ||
5130 | /** | |
5131 | * ice_unload - unload pf by stopping VSI and deinit hw | |
5132 | * @pf: pointer to the pf instance | |
5133 | */ | |
5134 | void ice_unload(struct ice_pf *pf) | |
5135 | { | |
5136 | ice_deinit_features(pf); | |
5137 | ice_deinit_rdma(pf); | |
b3e7b3a6 | 5138 | rtnl_lock(); |
7d46c0e6 | 5139 | ice_stop_eth(ice_get_main_vsi(pf)); |
5b246e53 | 5140 | ice_vsi_decfg(ice_get_main_vsi(pf)); |
b3e7b3a6 | 5141 | rtnl_unlock(); |
5b246e53 MS |
5142 | ice_deinit_dev(pf); |
5143 | } | |
5144 | ||
5145 | /** | |
5146 | * ice_probe - Device initialization routine | |
5147 | * @pdev: PCI device information struct | |
5148 | * @ent: entry in ice_pci_tbl | |
5149 | * | |
5150 | * Returns 0 on success, negative on failure | |
5151 | */ | |
5152 | static int | |
5153 | ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) | |
5154 | { | |
5155 | struct device *dev = &pdev->dev; | |
5156 | struct ice_pf *pf; | |
5157 | struct ice_hw *hw; | |
5158 | int err; | |
5159 | ||
5160 | if (pdev->is_virtfn) { | |
5161 | dev_err(dev, "can't probe a virtual function\n"); | |
5162 | return -EINVAL; | |
5163 | } | |
5164 | ||
0288c3e7 JB |
5165 | /* when under a kdump kernel initiate a reset before enabling the |
5166 | * device in order to clear out any pending DMA transactions. These | |
5167 | * transactions can cause some systems to machine check when doing | |
5168 | * the pcim_enable_device() below. | |
5169 | */ | |
5170 | if (is_kdump_kernel()) { | |
5171 | pci_save_state(pdev); | |
5172 | pci_clear_master(pdev); | |
5173 | err = pcie_flr(pdev); | |
5174 | if (err) | |
5175 | return err; | |
5176 | pci_restore_state(pdev); | |
5177 | } | |
5178 | ||
5b246e53 MS |
5179 | /* this driver uses devres, see |
5180 | * Documentation/driver-api/driver-model/devres.rst | |
5181 | */ | |
5182 | err = pcim_enable_device(pdev); | |
5183 | if (err) | |
5184 | return err; | |
5185 | ||
5186 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); | |
2b8db6af | 5187 | if (err) { |
5b246e53 MS |
5188 | dev_err(dev, "BAR0 I/O map error %d\n", err); |
5189 | return err; | |
d25a0fc4 DE |
5190 | } |
5191 | ||
5b246e53 MS |
5192 | pf = ice_allocate_pf(dev); |
5193 | if (!pf) | |
5194 | return -ENOMEM; | |
f31e4b6f | 5195 | |
5b246e53 MS |
5196 | /* initialize Auxiliary index to invalid value */ |
5197 | pf->aux_idx = -1; | |
5198 | ||
5199 | /* set up for high or low DMA */ | |
5200 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); | |
5201 | if (err) { | |
5202 | dev_err(dev, "DMA configuration failed: 0x%x\n", err); | |
5203 | return err; | |
5204 | } | |
5205 | ||
5206 | pci_set_master(pdev); | |
5207 | ||
5208 | pf->pdev = pdev; | |
5209 | pci_set_drvdata(pdev, pf); | |
7e408e07 | 5210 | set_bit(ICE_DOWN, pf->state); |
5b246e53 MS |
5211 | /* Disable service task until DOWN bit is cleared */ |
5212 | set_bit(ICE_SERVICE_DIS, pf->state); | |
5213 | ||
5214 | hw = &pf->hw; | |
5215 | hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; | |
5216 | pci_save_state(pdev); | |
5217 | ||
5218 | hw->back = pf; | |
5219 | hw->port_info = NULL; | |
5220 | hw->vendor_id = pdev->vendor; | |
5221 | hw->device_id = pdev->device; | |
5222 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | |
5223 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | |
5224 | hw->subsystem_device_id = pdev->subsystem_device; | |
5225 | hw->bus.device = PCI_SLOT(pdev->devfn); | |
5226 | hw->bus.func = PCI_FUNC(pdev->devfn); | |
5227 | ice_set_ctrlq_len(hw); | |
5228 | ||
5229 | pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); | |
5230 | ||
5231 | #ifndef CONFIG_DYNAMIC_DEBUG | |
5232 | if (debug < -1) | |
5233 | hw->debug_mask = debug; | |
5234 | #endif | |
5235 | ||
5236 | err = ice_init(pf); | |
5237 | if (err) | |
5238 | goto err_init; | |
5239 | ||
5240 | err = ice_init_eth(pf); | |
5241 | if (err) | |
5242 | goto err_init_eth; | |
5243 | ||
5244 | err = ice_init_rdma(pf); | |
5245 | if (err) | |
5246 | goto err_init_rdma; | |
5247 | ||
5248 | err = ice_init_devlink(pf); | |
5249 | if (err) | |
5250 | goto err_init_devlink; | |
5251 | ||
5252 | ice_init_features(pf); | |
5253 | ||
5254 | return 0; | |
5255 | ||
5256 | err_init_devlink: | |
5257 | ice_deinit_rdma(pf); | |
5258 | err_init_rdma: | |
5259 | ice_deinit_eth(pf); | |
5260 | err_init_eth: | |
5261 | ice_deinit(pf); | |
5262 | err_init: | |
769c500d | 5263 | pci_disable_device(pdev); |
f31e4b6f | 5264 | return err; |
837f08fd AV |
5265 | } |
5266 | ||
769c500d AA |
5267 | /** |
5268 | * ice_set_wake - enable or disable Wake on LAN | |
5269 | * @pf: pointer to the PF struct | |
5270 | * | |
5271 | * Simple helper for WoL control | |
5272 | */ | |
5273 | static void ice_set_wake(struct ice_pf *pf) | |
5274 | { | |
5275 | struct ice_hw *hw = &pf->hw; | |
5276 | bool wol = pf->wol_ena; | |
5277 | ||
5278 | /* clear wake state, otherwise new wake events won't fire */ | |
5279 | wr32(hw, PFPM_WUS, U32_MAX); | |
5280 | ||
5281 | /* enable / disable APM wake up, no RMW needed */ | |
5282 | wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); | |
5283 | ||
5284 | /* set magic packet filter enabled */ | |
5285 | wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); | |
5286 | } | |
5287 | ||
5288 | /** | |
ef860480 | 5289 | * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet |
769c500d AA |
5290 | * @pf: pointer to the PF struct |
5291 | * | |
5292 | * Issue firmware command to enable multicast magic wake, making | |
5293 | * sure that any locally administered address (LAA) is used for | |
5294 | * wake, and that PF reset doesn't undo the LAA. | |
5295 | */ | |
5296 | static void ice_setup_mc_magic_wake(struct ice_pf *pf) | |
5297 | { | |
5298 | struct device *dev = ice_pf_to_dev(pf); | |
5299 | struct ice_hw *hw = &pf->hw; | |
769c500d AA |
5300 | u8 mac_addr[ETH_ALEN]; |
5301 | struct ice_vsi *vsi; | |
5518ac2a | 5302 | int status; |
769c500d AA |
5303 | u8 flags; |
5304 | ||
5305 | if (!pf->wol_ena) | |
5306 | return; | |
5307 | ||
5308 | vsi = ice_get_main_vsi(pf); | |
5309 | if (!vsi) | |
5310 | return; | |
5311 | ||
5312 | /* Get current MAC address in case it's an LAA */ | |
5313 | if (vsi->netdev) | |
5314 | ether_addr_copy(mac_addr, vsi->netdev->dev_addr); | |
5315 | else | |
5316 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); | |
5317 | ||
5318 | flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | | |
5319 | ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | | |
5320 | ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; | |
5321 | ||
5322 | status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); | |
5323 | if (status) | |
5f87ec48 | 5324 | dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n", |
5518ac2a | 5325 | status, ice_aq_str(hw->adminq.sq_last_status)); |
769c500d AA |
5326 | } |
5327 | ||
837f08fd AV |
5328 | /** |
5329 | * ice_remove - Device removal routine | |
5330 | * @pdev: PCI device information struct | |
5331 | */ | |
5332 | static void ice_remove(struct pci_dev *pdev) | |
5333 | { | |
5334 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
81b23589 | 5335 | int i; |
837f08fd | 5336 | |
afd9d4ab AV |
5337 | for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { |
5338 | if (!ice_is_reset_in_progress(pf->state)) | |
5339 | break; | |
5340 | msleep(100); | |
5341 | } | |
5342 | ||
96a9a934 PSJ |
5343 | ice_debugfs_exit(); |
5344 | ||
f844d521 | 5345 | if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { |
7e408e07 | 5346 | set_bit(ICE_VF_RESETS_DISABLED, pf->state); |
f844d521 BC |
5347 | ice_free_vfs(pf); |
5348 | } | |
5349 | ||
4da71a77 KK |
5350 | ice_hwmon_exit(pf); |
5351 | ||
8d81fa55 | 5352 | ice_service_task_stop(pf); |
d69ea414 | 5353 | ice_aq_cancel_waiting_tasks(pf); |
f9f5301e | 5354 | set_bit(ICE_DOWN, pf->state); |
d69ea414 | 5355 | |
28bf2672 BC |
5356 | if (!ice_is_safe_mode(pf)) |
5357 | ice_remove_arfs(pf); | |
5b246e53 MS |
5358 | ice_deinit_features(pf); |
5359 | ice_deinit_devlink(pf); | |
5360 | ice_deinit_rdma(pf); | |
5361 | ice_deinit_eth(pf); | |
5362 | ice_deinit(pf); | |
5363 | ||
0f9d5027 | 5364 | ice_vsi_release_all(pf); |
5b246e53 MS |
5365 | |
5366 | ice_setup_mc_magic_wake(pf); | |
769c500d | 5367 | ice_set_wake(pf); |
1adf7ead | 5368 | |
769c500d AA |
5369 | pci_disable_device(pdev); |
5370 | } | |
5371 | ||
5372 | /** | |
5373 | * ice_shutdown - PCI callback for shutting down device | |
5374 | * @pdev: PCI device information struct | |
5375 | */ | |
5376 | static void ice_shutdown(struct pci_dev *pdev) | |
5377 | { | |
5378 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
5379 | ||
5380 | ice_remove(pdev); | |
5381 | ||
5382 | if (system_state == SYSTEM_POWER_OFF) { | |
5383 | pci_wake_from_d3(pdev, pf->wol_ena); | |
5384 | pci_set_power_state(pdev, PCI_D3hot); | |
5385 | } | |
837f08fd AV |
5386 | } |
5387 | ||
769c500d AA |
5388 | #ifdef CONFIG_PM |
5389 | /** | |
5390 | * ice_prepare_for_shutdown - prep for PCI shutdown | |
5391 | * @pf: board private structure | |
5392 | * | |
5393 | * Inform or close all dependent features in prep for PCI device shutdown | |
5394 | */ | |
5395 | static void ice_prepare_for_shutdown(struct ice_pf *pf) | |
5396 | { | |
5397 | struct ice_hw *hw = &pf->hw; | |
5398 | u32 v; | |
5399 | ||
5400 | /* Notify VFs of impending reset */ | |
5401 | if (ice_check_sq_alive(hw, &hw->mailboxq)) | |
5402 | ice_vc_notify_reset(pf); | |
5403 | ||
5404 | dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); | |
5405 | ||
5406 | /* disable the VSIs and their queues that are not already DOWN */ | |
5407 | ice_pf_dis_all_vsi(pf, false); | |
5408 | ||
5409 | ice_for_each_vsi(pf, v) | |
5410 | if (pf->vsi[v]) | |
5411 | pf->vsi[v]->vsi_num = 0; | |
5412 | ||
5413 | ice_shutdown_all_ctrlq(hw); | |
5414 | } | |
5415 | ||
5416 | /** | |
5417 | * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme | |
5418 | * @pf: board private structure to reinitialize | |
5419 | * | |
5420 | * This routine reinitialize interrupt scheme that was cleared during | |
5421 | * power management suspend callback. | |
5422 | * | |
5423 | * This should be called during resume routine to re-allocate the q_vectors | |
5424 | * and reacquire interrupts. | |
5425 | */ | |
5426 | static int ice_reinit_interrupt_scheme(struct ice_pf *pf) | |
5427 | { | |
5428 | struct device *dev = ice_pf_to_dev(pf); | |
5429 | int ret, v; | |
5430 | ||
5431 | /* Since we clear MSIX flag during suspend, we need to | |
5432 | * set it back during resume... | |
5433 | */ | |
5434 | ||
5435 | ret = ice_init_interrupt_scheme(pf); | |
5436 | if (ret) { | |
5437 | dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); | |
5438 | return ret; | |
5439 | } | |
5440 | ||
5441 | /* Remap vectors and rings, after successful re-init interrupts */ | |
5442 | ice_for_each_vsi(pf, v) { | |
5443 | if (!pf->vsi[v]) | |
5444 | continue; | |
5445 | ||
5446 | ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); | |
5447 | if (ret) | |
5448 | goto err_reinit; | |
5449 | ice_vsi_map_rings_to_vectors(pf->vsi[v]); | |
080b0c8d | 5450 | ice_vsi_set_napi_queues(pf->vsi[v]); |
769c500d AA |
5451 | } |
5452 | ||
5453 | ret = ice_req_irq_msix_misc(pf); | |
5454 | if (ret) { | |
5455 | dev_err(dev, "Setting up misc vector failed after device suspend %d\n", | |
5456 | ret); | |
5457 | goto err_reinit; | |
5458 | } | |
5459 | ||
5460 | return 0; | |
5461 | ||
5462 | err_reinit: | |
5463 | while (v--) | |
5464 | if (pf->vsi[v]) | |
5465 | ice_vsi_free_q_vectors(pf->vsi[v]); | |
5466 | ||
5467 | return ret; | |
5468 | } | |
5469 | ||
5470 | /** | |
5471 | * ice_suspend | |
5472 | * @dev: generic device information structure | |
5473 | * | |
5474 | * Power Management callback to quiesce the device and prepare | |
5475 | * for D3 transition. | |
5476 | */ | |
65c72291 | 5477 | static int __maybe_unused ice_suspend(struct device *dev) |
769c500d AA |
5478 | { |
5479 | struct pci_dev *pdev = to_pci_dev(dev); | |
5480 | struct ice_pf *pf; | |
5481 | int disabled, v; | |
5482 | ||
5483 | pf = pci_get_drvdata(pdev); | |
5484 | ||
5485 | if (!ice_pf_state_is_nominal(pf)) { | |
5486 | dev_err(dev, "Device is not ready, no need to suspend it\n"); | |
5487 | return -EBUSY; | |
5488 | } | |
5489 | ||
5490 | /* Stop watchdog tasks until resume completion. | |
5491 | * Even though it is most likely that the service task is | |
5492 | * disabled if the device is suspended or down, the service task's | |
5493 | * state is controlled by a different state bit, and we should | |
5494 | * store and honor whatever state that bit is in at this point. | |
5495 | */ | |
5496 | disabled = ice_service_task_stop(pf); | |
5497 | ||
f9f5301e DE |
5498 | ice_unplug_aux_dev(pf); |
5499 | ||
769c500d | 5500 | /* Already suspended?, then there is nothing to do */ |
7e408e07 | 5501 | if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { |
769c500d AA |
5502 | if (!disabled) |
5503 | ice_service_task_restart(pf); | |
5504 | return 0; | |
5505 | } | |
5506 | ||
7e408e07 | 5507 | if (test_bit(ICE_DOWN, pf->state) || |
769c500d AA |
5508 | ice_is_reset_in_progress(pf->state)) { |
5509 | dev_err(dev, "can't suspend device in reset or already down\n"); | |
5510 | if (!disabled) | |
5511 | ice_service_task_restart(pf); | |
5512 | return 0; | |
5513 | } | |
5514 | ||
5515 | ice_setup_mc_magic_wake(pf); | |
5516 | ||
5517 | ice_prepare_for_shutdown(pf); | |
5518 | ||
5519 | ice_set_wake(pf); | |
5520 | ||
5521 | /* Free vectors, clear the interrupt scheme and release IRQs | |
5522 | * for proper hibernation, especially with large number of CPUs. | |
5523 | * Otherwise hibernation might fail when mapping all the vectors back | |
5524 | * to CPU0. | |
5525 | */ | |
5526 | ice_free_irq_msix_misc(pf); | |
5527 | ice_for_each_vsi(pf, v) { | |
5528 | if (!pf->vsi[v]) | |
5529 | continue; | |
5530 | ice_vsi_free_q_vectors(pf->vsi[v]); | |
5531 | } | |
5532 | ice_clear_interrupt_scheme(pf); | |
5533 | ||
466e4392 | 5534 | pci_save_state(pdev); |
769c500d AA |
5535 | pci_wake_from_d3(pdev, pf->wol_ena); |
5536 | pci_set_power_state(pdev, PCI_D3hot); | |
5537 | return 0; | |
5538 | } | |
5539 | ||
5540 | /** | |
5541 | * ice_resume - PM callback for waking up from D3 | |
5542 | * @dev: generic device information structure | |
5543 | */ | |
65c72291 | 5544 | static int __maybe_unused ice_resume(struct device *dev) |
769c500d AA |
5545 | { |
5546 | struct pci_dev *pdev = to_pci_dev(dev); | |
5547 | enum ice_reset_req reset_type; | |
5548 | struct ice_pf *pf; | |
5549 | struct ice_hw *hw; | |
5550 | int ret; | |
5551 | ||
5552 | pci_set_power_state(pdev, PCI_D0); | |
5553 | pci_restore_state(pdev); | |
5554 | pci_save_state(pdev); | |
5555 | ||
5556 | if (!pci_device_is_present(pdev)) | |
5557 | return -ENODEV; | |
5558 | ||
5559 | ret = pci_enable_device_mem(pdev); | |
5560 | if (ret) { | |
5561 | dev_err(dev, "Cannot enable device after suspend\n"); | |
5562 | return ret; | |
5563 | } | |
5564 | ||
5565 | pf = pci_get_drvdata(pdev); | |
5566 | hw = &pf->hw; | |
5567 | ||
5568 | pf->wakeup_reason = rd32(hw, PFPM_WUS); | |
5569 | ice_print_wake_reason(pf); | |
5570 | ||
5571 | /* We cleared the interrupt scheme when we suspended, so we need to | |
5572 | * restore it now to resume device functionality. | |
5573 | */ | |
5574 | ret = ice_reinit_interrupt_scheme(pf); | |
5575 | if (ret) | |
5576 | dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); | |
5577 | ||
7e408e07 | 5578 | clear_bit(ICE_DOWN, pf->state); |
769c500d AA |
5579 | /* Now perform PF reset and rebuild */ |
5580 | reset_type = ICE_RESET_PFR; | |
5581 | /* re-enable service task for reset, but allow reset to schedule it */ | |
7e408e07 | 5582 | clear_bit(ICE_SERVICE_DIS, pf->state); |
769c500d AA |
5583 | |
5584 | if (ice_schedule_reset(pf, reset_type)) | |
5585 | dev_err(dev, "Reset during resume failed.\n"); | |
5586 | ||
7e408e07 | 5587 | clear_bit(ICE_SUSPENDED, pf->state); |
769c500d AA |
5588 | ice_service_task_restart(pf); |
5589 | ||
5590 | /* Restart the service task */ | |
5591 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); | |
5592 | ||
5593 | return 0; | |
5594 | } | |
5595 | #endif /* CONFIG_PM */ | |
5596 | ||
5995b6d0 BC |
5597 | /** |
5598 | * ice_pci_err_detected - warning that PCI error has been detected | |
5599 | * @pdev: PCI device information struct | |
5600 | * @err: the type of PCI error | |
5601 | * | |
5602 | * Called to warn that something happened on the PCI bus and the error handling | |
5603 | * is in progress. Allows the driver to gracefully prepare/handle PCI errors. | |
5604 | */ | |
5605 | static pci_ers_result_t | |
16d79cd4 | 5606 | ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) |
5995b6d0 BC |
5607 | { |
5608 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
5609 | ||
5610 | if (!pf) { | |
5611 | dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", | |
5612 | __func__, err); | |
5613 | return PCI_ERS_RESULT_DISCONNECT; | |
5614 | } | |
5615 | ||
7e408e07 | 5616 | if (!test_bit(ICE_SUSPENDED, pf->state)) { |
5995b6d0 BC |
5617 | ice_service_task_stop(pf); |
5618 | ||
7e408e07 AV |
5619 | if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { |
5620 | set_bit(ICE_PFR_REQ, pf->state); | |
fbc7b27a | 5621 | ice_prepare_for_reset(pf, ICE_RESET_PFR); |
5995b6d0 BC |
5622 | } |
5623 | } | |
5624 | ||
5625 | return PCI_ERS_RESULT_NEED_RESET; | |
5626 | } | |
5627 | ||
5628 | /** | |
5629 | * ice_pci_err_slot_reset - a PCI slot reset has just happened | |
5630 | * @pdev: PCI device information struct | |
5631 | * | |
5632 | * Called to determine if the driver can recover from the PCI slot reset by | |
5633 | * using a register read to determine if the device is recoverable. | |
5634 | */ | |
5635 | static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) | |
5636 | { | |
5637 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
5638 | pci_ers_result_t result; | |
5639 | int err; | |
5640 | u32 reg; | |
5641 | ||
5642 | err = pci_enable_device_mem(pdev); | |
5643 | if (err) { | |
19cce2c6 | 5644 | dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", |
5995b6d0 BC |
5645 | err); |
5646 | result = PCI_ERS_RESULT_DISCONNECT; | |
5647 | } else { | |
5648 | pci_set_master(pdev); | |
5649 | pci_restore_state(pdev); | |
5650 | pci_save_state(pdev); | |
5651 | pci_wake_from_d3(pdev, false); | |
5652 | ||
5653 | /* Check for life */ | |
5654 | reg = rd32(&pf->hw, GLGEN_RTRIG); | |
5655 | if (!reg) | |
5656 | result = PCI_ERS_RESULT_RECOVERED; | |
5657 | else | |
5658 | result = PCI_ERS_RESULT_DISCONNECT; | |
5659 | } | |
5660 | ||
5995b6d0 BC |
5661 | return result; |
5662 | } | |
5663 | ||
5664 | /** | |
5665 | * ice_pci_err_resume - restart operations after PCI error recovery | |
5666 | * @pdev: PCI device information struct | |
5667 | * | |
5668 | * Called to allow the driver to bring things back up after PCI error and/or | |
5669 | * reset recovery have finished | |
5670 | */ | |
5671 | static void ice_pci_err_resume(struct pci_dev *pdev) | |
5672 | { | |
5673 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
5674 | ||
5675 | if (!pf) { | |
19cce2c6 AV |
5676 | dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", |
5677 | __func__); | |
5995b6d0 BC |
5678 | return; |
5679 | } | |
5680 | ||
7e408e07 | 5681 | if (test_bit(ICE_SUSPENDED, pf->state)) { |
5995b6d0 BC |
5682 | dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", |
5683 | __func__); | |
5684 | return; | |
5685 | } | |
5686 | ||
31642d28 | 5687 | ice_restore_all_vfs_msi_state(pf); |
a54a0b24 | 5688 | |
5995b6d0 BC |
5689 | ice_do_reset(pf, ICE_RESET_PFR); |
5690 | ice_service_task_restart(pf); | |
5691 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); | |
5692 | } | |
5693 | ||
5694 | /** | |
5695 | * ice_pci_err_reset_prepare - prepare device driver for PCI reset | |
5696 | * @pdev: PCI device information struct | |
5697 | */ | |
5698 | static void ice_pci_err_reset_prepare(struct pci_dev *pdev) | |
5699 | { | |
5700 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
5701 | ||
7e408e07 | 5702 | if (!test_bit(ICE_SUSPENDED, pf->state)) { |
5995b6d0 BC |
5703 | ice_service_task_stop(pf); |
5704 | ||
7e408e07 AV |
5705 | if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { |
5706 | set_bit(ICE_PFR_REQ, pf->state); | |
fbc7b27a | 5707 | ice_prepare_for_reset(pf, ICE_RESET_PFR); |
5995b6d0 BC |
5708 | } |
5709 | } | |
5710 | } | |
5711 | ||
5712 | /** | |
5713 | * ice_pci_err_reset_done - PCI reset done, device driver reset can begin | |
5714 | * @pdev: PCI device information struct | |
5715 | */ | |
5716 | static void ice_pci_err_reset_done(struct pci_dev *pdev) | |
5717 | { | |
5718 | ice_pci_err_resume(pdev); | |
5719 | } | |
5720 | ||
837f08fd AV |
5721 | /* ice_pci_tbl - PCI Device ID Table |
5722 | * | |
5723 | * Wildcard entries (PCI_ANY_ID) should come last | |
5724 | * Last entry must be all 0s | |
5725 | * | |
5726 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | |
5727 | * Class, Class Mask, private data (not used) } | |
5728 | */ | |
5729 | static const struct pci_device_id ice_pci_tbl[] = { | |
f8ab08c0 PC |
5730 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) }, |
5731 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) }, | |
5732 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) }, | |
5733 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) }, | |
5734 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) }, | |
5735 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) }, | |
5736 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) }, | |
5737 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) }, | |
5738 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) }, | |
5739 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) }, | |
5740 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) }, | |
5741 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) }, | |
5742 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) }, | |
5743 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) }, | |
5744 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) }, | |
5745 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) }, | |
5746 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) }, | |
5747 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) }, | |
5748 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) }, | |
5749 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) }, | |
5750 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) }, | |
5751 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) }, | |
5752 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) }, | |
5753 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, | |
5754 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, | |
5755 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, | |
ba20ecb1 PC |
5756 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, |
5757 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, | |
5758 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, | |
5759 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) }, | |
837f08fd | 5760 | /* required last entry */ |
f8ab08c0 | 5761 | {} |
837f08fd AV |
5762 | }; |
5763 | MODULE_DEVICE_TABLE(pci, ice_pci_tbl); | |
5764 | ||
769c500d AA |
5765 | static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); |
5766 | ||
5995b6d0 BC |
5767 | static const struct pci_error_handlers ice_pci_err_handler = { |
5768 | .error_detected = ice_pci_err_detected, | |
5769 | .slot_reset = ice_pci_err_slot_reset, | |
5770 | .reset_prepare = ice_pci_err_reset_prepare, | |
5771 | .reset_done = ice_pci_err_reset_done, | |
5772 | .resume = ice_pci_err_resume | |
5773 | }; | |
5774 | ||
837f08fd AV |
5775 | static struct pci_driver ice_driver = { |
5776 | .name = KBUILD_MODNAME, | |
5777 | .id_table = ice_pci_tbl, | |
5778 | .probe = ice_probe, | |
5779 | .remove = ice_remove, | |
769c500d AA |
5780 | #ifdef CONFIG_PM |
5781 | .driver.pm = &ice_pm_ops, | |
5782 | #endif /* CONFIG_PM */ | |
5783 | .shutdown = ice_shutdown, | |
ddf30f7f | 5784 | .sriov_configure = ice_sriov_configure, |
05c16687 MS |
5785 | .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, |
5786 | .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count, | |
5995b6d0 | 5787 | .err_handler = &ice_pci_err_handler |
837f08fd AV |
5788 | }; |
5789 | ||
5790 | /** | |
5791 | * ice_module_init - Driver registration routine | |
5792 | * | |
5793 | * ice_module_init is the first routine called when the driver is | |
5794 | * loaded. All it does is register with the PCI subsystem. | |
5795 | */ | |
5796 | static int __init ice_module_init(void) | |
5797 | { | |
bb52f42a | 5798 | int status = -ENOMEM; |
837f08fd | 5799 | |
34a2a3b8 | 5800 | pr_info("%s\n", ice_driver_string); |
837f08fd AV |
5801 | pr_info("%s\n", ice_copyright); |
5802 | ||
982b0192 PC |
5803 | ice_adv_lnk_speed_maps_init(); |
5804 | ||
4d159f78 | 5805 | ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); |
940b61af AV |
5806 | if (!ice_wq) { |
5807 | pr_err("Failed to create workqueue\n"); | |
bb52f42a DE |
5808 | return status; |
5809 | } | |
5810 | ||
5811 | ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0); | |
5812 | if (!ice_lag_wq) { | |
5813 | pr_err("Failed to create LAG workqueue\n"); | |
5814 | goto err_dest_wq; | |
940b61af AV |
5815 | } |
5816 | ||
96a9a934 PSJ |
5817 | ice_debugfs_init(); |
5818 | ||
837f08fd | 5819 | status = pci_register_driver(&ice_driver); |
940b61af | 5820 | if (status) { |
2f2da36e | 5821 | pr_err("failed to register PCI driver, err %d\n", status); |
bb52f42a | 5822 | goto err_dest_lag_wq; |
940b61af | 5823 | } |
837f08fd | 5824 | |
bb52f42a DE |
5825 | return 0; |
5826 | ||
5827 | err_dest_lag_wq: | |
5828 | destroy_workqueue(ice_lag_wq); | |
96a9a934 | 5829 | ice_debugfs_exit(); |
bb52f42a DE |
5830 | err_dest_wq: |
5831 | destroy_workqueue(ice_wq); | |
837f08fd AV |
5832 | return status; |
5833 | } | |
5834 | module_init(ice_module_init); | |
5835 | ||
5836 | /** | |
5837 | * ice_module_exit - Driver exit cleanup routine | |
5838 | * | |
5839 | * ice_module_exit is called just before the driver is removed | |
5840 | * from memory. | |
5841 | */ | |
5842 | static void __exit ice_module_exit(void) | |
5843 | { | |
5844 | pci_unregister_driver(&ice_driver); | |
940b61af | 5845 | destroy_workqueue(ice_wq); |
bb52f42a | 5846 | destroy_workqueue(ice_lag_wq); |
837f08fd AV |
5847 | pr_info("module unloaded\n"); |
5848 | } | |
5849 | module_exit(ice_module_exit); | |
3a858ba3 | 5850 | |
e94d4478 | 5851 | /** |
f9867df6 | 5852 | * ice_set_mac_address - NDO callback to set MAC address |
e94d4478 AV |
5853 | * @netdev: network interface device structure |
5854 | * @pi: pointer to an address structure | |
5855 | * | |
5856 | * Returns 0 on success, negative on failure | |
5857 | */ | |
5858 | static int ice_set_mac_address(struct net_device *netdev, void *pi) | |
5859 | { | |
5860 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5861 | struct ice_vsi *vsi = np->vsi; | |
5862 | struct ice_pf *pf = vsi->back; | |
5863 | struct ice_hw *hw = &pf->hw; | |
5864 | struct sockaddr *addr = pi; | |
b357d971 | 5865 | u8 old_mac[ETH_ALEN]; |
e94d4478 | 5866 | u8 flags = 0; |
e94d4478 | 5867 | u8 *mac; |
2ccc1c1c | 5868 | int err; |
e94d4478 AV |
5869 | |
5870 | mac = (u8 *)addr->sa_data; | |
5871 | ||
5872 | if (!is_valid_ether_addr(mac)) | |
5873 | return -EADDRNOTAVAIL; | |
5874 | ||
7e408e07 | 5875 | if (test_bit(ICE_DOWN, pf->state) || |
5df7e45d | 5876 | ice_is_reset_in_progress(pf->state)) { |
e94d4478 AV |
5877 | netdev_err(netdev, "can't set mac %pM. device not ready\n", |
5878 | mac); | |
5879 | return -EBUSY; | |
5880 | } | |
5881 | ||
9fea7498 KP |
5882 | if (ice_chnl_dmac_fltr_cnt(pf)) { |
5883 | netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n", | |
5884 | mac); | |
5885 | return -EAGAIN; | |
5886 | } | |
5887 | ||
3ba7f53f | 5888 | netif_addr_lock_bh(netdev); |
b357d971 BC |
5889 | ether_addr_copy(old_mac, netdev->dev_addr); |
5890 | /* change the netdev's MAC address */ | |
a05e4c0a | 5891 | eth_hw_addr_set(netdev, mac); |
b357d971 BC |
5892 | netif_addr_unlock_bh(netdev); |
5893 | ||
757976ab | 5894 | /* Clean up old MAC filter. Not an error if old filter doesn't exist */ |
2ccc1c1c TN |
5895 | err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); |
5896 | if (err && err != -ENOENT) { | |
e94d4478 | 5897 | err = -EADDRNOTAVAIL; |
bbb968e8 | 5898 | goto err_update_filters; |
e94d4478 AV |
5899 | } |
5900 | ||
13ed5e8a | 5901 | /* Add filter for new MAC. If filter exists, return success */ |
2ccc1c1c | 5902 | err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); |
2c0069f3 | 5903 | if (err == -EEXIST) { |
13ed5e8a NN |
5904 | /* Although this MAC filter is already present in hardware it's |
5905 | * possible in some cases (e.g. bonding) that dev_addr was | |
5906 | * modified outside of the driver and needs to be restored back | |
5907 | * to this value. | |
5908 | */ | |
757976ab | 5909 | netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); |
2c0069f3 IV |
5910 | |
5911 | return 0; | |
5912 | } else if (err) { | |
3ba7f53f | 5913 | /* error if the new filter addition failed */ |
757976ab | 5914 | err = -EADDRNOTAVAIL; |
2c0069f3 | 5915 | } |
757976ab | 5916 | |
bbb968e8 | 5917 | err_update_filters: |
e94d4478 | 5918 | if (err) { |
2f2da36e | 5919 | netdev_err(netdev, "can't set MAC %pM. filter update failed\n", |
e94d4478 | 5920 | mac); |
b357d971 | 5921 | netif_addr_lock_bh(netdev); |
f3956ebb | 5922 | eth_hw_addr_set(netdev, old_mac); |
3ba7f53f | 5923 | netif_addr_unlock_bh(netdev); |
e94d4478 AV |
5924 | return err; |
5925 | } | |
5926 | ||
2f2da36e | 5927 | netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", |
e94d4478 AV |
5928 | netdev->dev_addr); |
5929 | ||
f9867df6 | 5930 | /* write new MAC address to the firmware */ |
e94d4478 | 5931 | flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; |
2ccc1c1c TN |
5932 | err = ice_aq_manage_mac_write(hw, mac, flags, NULL); |
5933 | if (err) { | |
5f87ec48 | 5934 | netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n", |
2ccc1c1c | 5935 | mac, err); |
e94d4478 AV |
5936 | } |
5937 | return 0; | |
5938 | } | |
5939 | ||
5940 | /** | |
5941 | * ice_set_rx_mode - NDO callback to set the netdev filters | |
5942 | * @netdev: network interface device structure | |
5943 | */ | |
5944 | static void ice_set_rx_mode(struct net_device *netdev) | |
5945 | { | |
5946 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5947 | struct ice_vsi *vsi = np->vsi; | |
5948 | ||
2571a3fa | 5949 | if (!vsi || ice_is_switchdev_running(vsi->back)) |
e94d4478 AV |
5950 | return; |
5951 | ||
5952 | /* Set the flags to synchronize filters | |
5953 | * ndo_set_rx_mode may be triggered even without a change in netdev | |
5954 | * flags | |
5955 | */ | |
e97fb1ae AV |
5956 | set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
5957 | set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); | |
e94d4478 AV |
5958 | set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); |
5959 | ||
5960 | /* schedule our worker thread which will take care of | |
5961 | * applying the new filter changes | |
5962 | */ | |
5963 | ice_service_task_schedule(vsi->back); | |
5964 | } | |
5965 | ||
1ddef455 UK |
5966 | /** |
5967 | * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate | |
5968 | * @netdev: network interface device structure | |
5969 | * @queue_index: Queue ID | |
5970 | * @maxrate: maximum bandwidth in Mbps | |
5971 | */ | |
5972 | static int | |
5973 | ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) | |
5974 | { | |
5975 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5976 | struct ice_vsi *vsi = np->vsi; | |
1ddef455 | 5977 | u16 q_handle; |
5518ac2a | 5978 | int status; |
1ddef455 UK |
5979 | u8 tc; |
5980 | ||
5981 | /* Validate maxrate requested is within permitted range */ | |
5982 | if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { | |
19cce2c6 | 5983 | netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", |
1ddef455 UK |
5984 | maxrate, queue_index); |
5985 | return -EINVAL; | |
5986 | } | |
5987 | ||
5988 | q_handle = vsi->tx_rings[queue_index]->q_handle; | |
5989 | tc = ice_dcb_get_tc(vsi, queue_index); | |
5990 | ||
479cdfe3 SS |
5991 | vsi = ice_locate_vsi_using_queue(vsi, queue_index); |
5992 | if (!vsi) { | |
5993 | netdev_err(netdev, "Invalid VSI for given queue %d\n", | |
5994 | queue_index); | |
5995 | return -EINVAL; | |
5996 | } | |
5997 | ||
1ddef455 UK |
5998 | /* Set BW back to default, when user set maxrate to 0 */ |
5999 | if (!maxrate) | |
6000 | status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, | |
6001 | q_handle, ICE_MAX_BW); | |
6002 | else | |
6003 | status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, | |
6004 | q_handle, ICE_MAX_BW, maxrate * 1000); | |
c1484691 | 6005 | if (status) |
5f87ec48 TN |
6006 | netdev_err(netdev, "Unable to set Tx max rate, error %d\n", |
6007 | status); | |
1ddef455 | 6008 | |
c1484691 | 6009 | return status; |
1ddef455 UK |
6010 | } |
6011 | ||
e94d4478 AV |
6012 | /** |
6013 | * ice_fdb_add - add an entry to the hardware database | |
6014 | * @ndm: the input from the stack | |
6015 | * @tb: pointer to array of nladdr (unused) | |
6016 | * @dev: the net device pointer | |
6017 | * @addr: the MAC address entry being added | |
f9867df6 | 6018 | * @vid: VLAN ID |
e94d4478 | 6019 | * @flags: instructions from stack about fdb operation |
99be37ed | 6020 | * @extack: netlink extended ack |
e94d4478 | 6021 | */ |
99be37ed BA |
6022 | static int |
6023 | ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], | |
6024 | struct net_device *dev, const unsigned char *addr, u16 vid, | |
6025 | u16 flags, struct netlink_ext_ack __always_unused *extack) | |
e94d4478 AV |
6026 | { |
6027 | int err; | |
6028 | ||
6029 | if (vid) { | |
6030 | netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); | |
6031 | return -EINVAL; | |
6032 | } | |
6033 | if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { | |
6034 | netdev_err(dev, "FDB only supports static addresses\n"); | |
6035 | return -EINVAL; | |
6036 | } | |
6037 | ||
6038 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) | |
6039 | err = dev_uc_add_excl(dev, addr); | |
6040 | else if (is_multicast_ether_addr(addr)) | |
6041 | err = dev_mc_add_excl(dev, addr); | |
6042 | else | |
6043 | err = -EINVAL; | |
6044 | ||
6045 | /* Only return duplicate errors if NLM_F_EXCL is set */ | |
6046 | if (err == -EEXIST && !(flags & NLM_F_EXCL)) | |
6047 | err = 0; | |
6048 | ||
6049 | return err; | |
6050 | } | |
6051 | ||
6052 | /** | |
6053 | * ice_fdb_del - delete an entry from the hardware database | |
6054 | * @ndm: the input from the stack | |
6055 | * @tb: pointer to array of nladdr (unused) | |
6056 | * @dev: the net device pointer | |
6057 | * @addr: the MAC address entry being added | |
f9867df6 | 6058 | * @vid: VLAN ID |
ca4567f1 | 6059 | * @extack: netlink extended ack |
e94d4478 | 6060 | */ |
c8b7abdd BA |
6061 | static int |
6062 | ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], | |
6063 | struct net_device *dev, const unsigned char *addr, | |
ca4567f1 | 6064 | __always_unused u16 vid, struct netlink_ext_ack *extack) |
e94d4478 AV |
6065 | { |
6066 | int err; | |
6067 | ||
6068 | if (ndm->ndm_state & NUD_PERMANENT) { | |
6069 | netdev_err(dev, "FDB only supports static addresses\n"); | |
6070 | return -EINVAL; | |
6071 | } | |
6072 | ||
6073 | if (is_unicast_ether_addr(addr)) | |
6074 | err = dev_uc_del(dev, addr); | |
6075 | else if (is_multicast_ether_addr(addr)) | |
6076 | err = dev_mc_del(dev, addr); | |
6077 | else | |
6078 | err = -EINVAL; | |
6079 | ||
6080 | return err; | |
6081 | } | |
6082 | ||
1babaf77 BC |
6083 | #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ |
6084 | NETIF_F_HW_VLAN_CTAG_TX | \ | |
6085 | NETIF_F_HW_VLAN_STAG_RX | \ | |
6086 | NETIF_F_HW_VLAN_STAG_TX) | |
6087 | ||
affa1029 AG |
6088 | #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ |
6089 | NETIF_F_HW_VLAN_STAG_RX) | |
6090 | ||
1babaf77 BC |
6091 | #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ |
6092 | NETIF_F_HW_VLAN_STAG_FILTER) | |
6093 | ||
6094 | /** | |
6095 | * ice_fix_features - fix the netdev features flags based on device limitations | |
6096 | * @netdev: ptr to the netdev that flags are being fixed on | |
6097 | * @features: features that need to be checked and possibly fixed | |
6098 | * | |
6099 | * Make sure any fixups are made to features in this callback. This enables the | |
6100 | * driver to not have to check unsupported configurations throughout the driver | |
6101 | * because that's the responsiblity of this callback. | |
6102 | * | |
6103 | * Single VLAN Mode (SVM) Supported Features: | |
6104 | * NETIF_F_HW_VLAN_CTAG_FILTER | |
6105 | * NETIF_F_HW_VLAN_CTAG_RX | |
6106 | * NETIF_F_HW_VLAN_CTAG_TX | |
6107 | * | |
6108 | * Double VLAN Mode (DVM) Supported Features: | |
6109 | * NETIF_F_HW_VLAN_CTAG_FILTER | |
6110 | * NETIF_F_HW_VLAN_CTAG_RX | |
6111 | * NETIF_F_HW_VLAN_CTAG_TX | |
6112 | * | |
6113 | * NETIF_F_HW_VLAN_STAG_FILTER | |
6114 | * NETIF_HW_VLAN_STAG_RX | |
6115 | * NETIF_HW_VLAN_STAG_TX | |
6116 | * | |
6117 | * Features that need fixing: | |
6118 | * Cannot simultaneously enable CTAG and STAG stripping and/or insertion. | |
6119 | * These are mutually exlusive as the VSI context cannot support multiple | |
6120 | * VLAN ethertypes simultaneously for stripping and/or insertion. If this | |
6121 | * is not done, then default to clearing the requested STAG offload | |
6122 | * settings. | |
6123 | * | |
6124 | * All supported filtering has to be enabled or disabled together. For | |
6125 | * example, in DVM, CTAG and STAG filtering have to be enabled and disabled | |
6126 | * together. If this is not done, then default to VLAN filtering disabled. | |
6127 | * These are mutually exclusive as there is currently no way to | |
6128 | * enable/disable VLAN filtering based on VLAN ethertype when using VLAN | |
6129 | * prune rules. | |
6130 | */ | |
6131 | static netdev_features_t | |
6132 | ice_fix_features(struct net_device *netdev, netdev_features_t features) | |
6133 | { | |
6134 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
9542ef4f RS |
6135 | netdev_features_t req_vlan_fltr, cur_vlan_fltr; |
6136 | bool cur_ctag, cur_stag, req_ctag, req_stag; | |
6137 | ||
6138 | cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; | |
6139 | cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; | |
6140 | cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; | |
6141 | ||
6142 | req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; | |
6143 | req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; | |
6144 | req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; | |
6145 | ||
6146 | if (req_vlan_fltr != cur_vlan_fltr) { | |
6147 | if (ice_is_dvm_ena(&np->vsi->back->hw)) { | |
6148 | if (req_ctag && req_stag) { | |
6149 | features |= NETIF_VLAN_FILTERING_FEATURES; | |
6150 | } else if (!req_ctag && !req_stag) { | |
6151 | features &= ~NETIF_VLAN_FILTERING_FEATURES; | |
6152 | } else if ((!cur_ctag && req_ctag && !cur_stag) || | |
6153 | (!cur_stag && req_stag && !cur_ctag)) { | |
6154 | features |= NETIF_VLAN_FILTERING_FEATURES; | |
6155 | netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); | |
6156 | } else if ((cur_ctag && !req_ctag && cur_stag) || | |
6157 | (cur_stag && !req_stag && cur_ctag)) { | |
6158 | features &= ~NETIF_VLAN_FILTERING_FEATURES; | |
6159 | netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); | |
6160 | } | |
1babaf77 | 6161 | } else { |
9542ef4f RS |
6162 | if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) |
6163 | netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); | |
6164 | ||
6165 | if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) | |
6166 | features |= NETIF_F_HW_VLAN_CTAG_FILTER; | |
1babaf77 BC |
6167 | } |
6168 | } | |
6169 | ||
6170 | if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) && | |
6171 | (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { | |
6172 | netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n"); | |
6173 | features &= ~(NETIF_F_HW_VLAN_STAG_RX | | |
6174 | NETIF_F_HW_VLAN_STAG_TX); | |
6175 | } | |
6176 | ||
affa1029 AG |
6177 | if (!(netdev->features & NETIF_F_RXFCS) && |
6178 | (features & NETIF_F_RXFCS) && | |
6179 | (features & NETIF_VLAN_STRIPPING_FEATURES) && | |
6180 | !ice_vsi_has_non_zero_vlans(np->vsi)) { | |
6181 | netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); | |
6182 | features &= ~NETIF_VLAN_STRIPPING_FEATURES; | |
6183 | } | |
6184 | ||
1babaf77 BC |
6185 | return features; |
6186 | } | |
6187 | ||
714ed949 LZ |
6188 | /** |
6189 | * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto | |
6190 | * @vsi: PF's VSI | |
6191 | * @vlan_ethertype: VLAN ethertype (802.1Q or 802.1ad) in network byte order | |
6192 | * | |
6193 | * Store current stripped VLAN proto in ring packet context, | |
6194 | * so it can be accessed more efficiently by packet processing code. | |
6195 | */ | |
6196 | static void | |
6197 | ice_set_rx_rings_vlan_proto(struct ice_vsi *vsi, __be16 vlan_ethertype) | |
6198 | { | |
6199 | u16 i; | |
6200 | ||
6201 | ice_for_each_alloc_rxq(vsi, i) | |
6202 | vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; | |
6203 | } | |
6204 | ||
1babaf77 BC |
6205 | /** |
6206 | * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI | |
6207 | * @vsi: PF's VSI | |
6208 | * @features: features used to determine VLAN offload settings | |
6209 | * | |
6210 | * First, determine the vlan_ethertype based on the VLAN offload bits in | |
6211 | * features. Then determine if stripping and insertion should be enabled or | |
6212 | * disabled. Finally enable or disable VLAN stripping and insertion. | |
6213 | */ | |
6214 | static int | |
6215 | ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features) | |
6216 | { | |
6217 | bool enable_stripping = true, enable_insertion = true; | |
6218 | struct ice_vsi_vlan_ops *vlan_ops; | |
6219 | int strip_err = 0, insert_err = 0; | |
6220 | u16 vlan_ethertype = 0; | |
6221 | ||
6222 | vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); | |
6223 | ||
6224 | if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) | |
6225 | vlan_ethertype = ETH_P_8021AD; | |
6226 | else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) | |
6227 | vlan_ethertype = ETH_P_8021Q; | |
6228 | ||
6229 | if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX))) | |
6230 | enable_stripping = false; | |
6231 | if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX))) | |
6232 | enable_insertion = false; | |
6233 | ||
6234 | if (enable_stripping) | |
6235 | strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); | |
6236 | else | |
6237 | strip_err = vlan_ops->dis_stripping(vsi); | |
6238 | ||
6239 | if (enable_insertion) | |
6240 | insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); | |
6241 | else | |
6242 | insert_err = vlan_ops->dis_insertion(vsi); | |
6243 | ||
6244 | if (strip_err || insert_err) | |
6245 | return -EIO; | |
6246 | ||
714ed949 LZ |
6247 | ice_set_rx_rings_vlan_proto(vsi, enable_stripping ? |
6248 | htons(vlan_ethertype) : 0); | |
6249 | ||
1babaf77 BC |
6250 | return 0; |
6251 | } | |
6252 | ||
6253 | /** | |
6254 | * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI | |
6255 | * @vsi: PF's VSI | |
6256 | * @features: features used to determine VLAN filtering settings | |
6257 | * | |
6258 | * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the | |
6259 | * features. | |
6260 | */ | |
6261 | static int | |
6262 | ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) | |
6263 | { | |
6264 | struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi); | |
6265 | int err = 0; | |
6266 | ||
6267 | /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking | |
6268 | * if either bit is set | |
6269 | */ | |
6270 | if (features & | |
6271 | (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) | |
6272 | err = vlan_ops->ena_rx_filtering(vsi); | |
6273 | else | |
6274 | err = vlan_ops->dis_rx_filtering(vsi); | |
6275 | ||
6276 | return err; | |
6277 | } | |
6278 | ||
6279 | /** | |
6280 | * ice_set_vlan_features - set VLAN settings based on suggested feature set | |
6281 | * @netdev: ptr to the netdev being adjusted | |
6282 | * @features: the feature set that the stack is suggesting | |
6283 | * | |
6284 | * Only update VLAN settings if the requested_vlan_features are different than | |
6285 | * the current_vlan_features. | |
6286 | */ | |
6287 | static int | |
6288 | ice_set_vlan_features(struct net_device *netdev, netdev_features_t features) | |
6289 | { | |
6290 | netdev_features_t current_vlan_features, requested_vlan_features; | |
6291 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
6292 | struct ice_vsi *vsi = np->vsi; | |
6293 | int err; | |
6294 | ||
6295 | current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; | |
6296 | requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; | |
6297 | if (current_vlan_features ^ requested_vlan_features) { | |
affa1029 AG |
6298 | if ((features & NETIF_F_RXFCS) && |
6299 | (features & NETIF_VLAN_STRIPPING_FEATURES)) { | |
6300 | dev_err(ice_pf_to_dev(vsi->back), | |
6301 | "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); | |
6302 | return -EIO; | |
6303 | } | |
6304 | ||
1babaf77 BC |
6305 | err = ice_set_vlan_offload_features(vsi, features); |
6306 | if (err) | |
6307 | return err; | |
6308 | } | |
6309 | ||
6310 | current_vlan_features = netdev->features & | |
6311 | NETIF_VLAN_FILTERING_FEATURES; | |
6312 | requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES; | |
6313 | if (current_vlan_features ^ requested_vlan_features) { | |
6314 | err = ice_set_vlan_filtering_features(vsi, features); | |
6315 | if (err) | |
6316 | return err; | |
6317 | } | |
6318 | ||
6319 | return 0; | |
6320 | } | |
6321 | ||
44ece4e1 MF |
6322 | /** |
6323 | * ice_set_loopback - turn on/off loopback mode on underlying PF | |
6324 | * @vsi: ptr to VSI | |
6325 | * @ena: flag to indicate the on/off setting | |
6326 | */ | |
6327 | static int ice_set_loopback(struct ice_vsi *vsi, bool ena) | |
6328 | { | |
6329 | bool if_running = netif_running(vsi->netdev); | |
6330 | int ret; | |
6331 | ||
6332 | if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { | |
6333 | ret = ice_down(vsi); | |
6334 | if (ret) { | |
6335 | netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); | |
6336 | return ret; | |
6337 | } | |
6338 | } | |
6339 | ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); | |
6340 | if (ret) | |
6341 | netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); | |
6342 | if (if_running) | |
6343 | ret = ice_up(vsi); | |
6344 | ||
6345 | return ret; | |
6346 | } | |
6347 | ||
d76a60ba AV |
6348 | /** |
6349 | * ice_set_features - set the netdev feature flags | |
6350 | * @netdev: ptr to the netdev being adjusted | |
6351 | * @features: the feature set that the stack is suggesting | |
6352 | */ | |
c8b7abdd BA |
6353 | static int |
6354 | ice_set_features(struct net_device *netdev, netdev_features_t features) | |
d76a60ba | 6355 | { |
c67672fa | 6356 | netdev_features_t changed = netdev->features ^ features; |
d76a60ba AV |
6357 | struct ice_netdev_priv *np = netdev_priv(netdev); |
6358 | struct ice_vsi *vsi = np->vsi; | |
5f8cc355 | 6359 | struct ice_pf *pf = vsi->back; |
d76a60ba AV |
6360 | int ret = 0; |
6361 | ||
462acf6a | 6362 | /* Don't set any netdev advanced features with device in Safe Mode */ |
c67672fa MF |
6363 | if (ice_is_safe_mode(pf)) { |
6364 | dev_err(ice_pf_to_dev(pf), | |
6365 | "Device is in Safe Mode - not enabling advanced netdev features\n"); | |
462acf6a TN |
6366 | return ret; |
6367 | } | |
6368 | ||
5f8cc355 HT |
6369 | /* Do not change setting during reset */ |
6370 | if (ice_is_reset_in_progress(pf->state)) { | |
c67672fa MF |
6371 | dev_err(ice_pf_to_dev(pf), |
6372 | "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); | |
5f8cc355 HT |
6373 | return -EBUSY; |
6374 | } | |
6375 | ||
8f529ff9 TN |
6376 | /* Multiple features can be changed in one call so keep features in |
6377 | * separate if/else statements to guarantee each feature is checked | |
6378 | */ | |
c67672fa MF |
6379 | if (changed & NETIF_F_RXHASH) |
6380 | ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH)); | |
492af0ab | 6381 | |
1babaf77 BC |
6382 | ret = ice_set_vlan_features(netdev, features); |
6383 | if (ret) | |
6384 | return ret; | |
3171948e | 6385 | |
dddd406d JB |
6386 | /* Turn on receive of FCS aka CRC, and after setting this |
6387 | * flag the packet data will have the 4 byte CRC appended | |
6388 | */ | |
6389 | if (changed & NETIF_F_RXFCS) { | |
affa1029 AG |
6390 | if ((features & NETIF_F_RXFCS) && |
6391 | (features & NETIF_VLAN_STRIPPING_FEATURES)) { | |
6392 | dev_err(ice_pf_to_dev(vsi->back), | |
6393 | "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); | |
6394 | return -EIO; | |
6395 | } | |
6396 | ||
dddd406d JB |
6397 | ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); |
6398 | ret = ice_down_up(vsi); | |
6399 | if (ret) | |
6400 | return ret; | |
6401 | } | |
6402 | ||
c67672fa MF |
6403 | if (changed & NETIF_F_NTUPLE) { |
6404 | bool ena = !!(features & NETIF_F_NTUPLE); | |
6405 | ||
6406 | ice_vsi_manage_fdir(vsi, ena); | |
6407 | ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi); | |
28bf2672 | 6408 | } |
148beb61 | 6409 | |
fbc7b27a KP |
6410 | /* don't turn off hw_tc_offload when ADQ is already enabled */ |
6411 | if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) { | |
6412 | dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n"); | |
6413 | return -EACCES; | |
6414 | } | |
9fea7498 | 6415 | |
c67672fa MF |
6416 | if (changed & NETIF_F_HW_TC) { |
6417 | bool ena = !!(features & NETIF_F_HW_TC); | |
9fea7498 | 6418 | |
c67672fa MF |
6419 | ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : |
6420 | clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); | |
6421 | } | |
9fea7498 | 6422 | |
44ece4e1 MF |
6423 | if (changed & NETIF_F_LOOPBACK) |
6424 | ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); | |
6425 | ||
6426 | return ret; | |
d76a60ba AV |
6427 | } |
6428 | ||
6429 | /** | |
c31af68a | 6430 | * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI |
f9867df6 | 6431 | * @vsi: VSI to setup VLAN properties for |
d76a60ba AV |
6432 | */ |
6433 | static int ice_vsi_vlan_setup(struct ice_vsi *vsi) | |
6434 | { | |
1babaf77 | 6435 | int err; |
d76a60ba | 6436 | |
1babaf77 BC |
6437 | err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); |
6438 | if (err) | |
6439 | return err; | |
d76a60ba | 6440 | |
1babaf77 BC |
6441 | err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); |
6442 | if (err) | |
6443 | return err; | |
d76a60ba | 6444 | |
c31af68a | 6445 | return ice_vsi_add_vlan_zero(vsi); |
d76a60ba AV |
6446 | } |
6447 | ||
cdedef59 | 6448 | /** |
0db66d20 | 6449 | * ice_vsi_cfg_lan - Setup the VSI lan related config |
cdedef59 AV |
6450 | * @vsi: the VSI being configured |
6451 | * | |
6452 | * Return 0 on success and negative value on error | |
6453 | */ | |
0db66d20 | 6454 | int ice_vsi_cfg_lan(struct ice_vsi *vsi) |
cdedef59 AV |
6455 | { |
6456 | int err; | |
6457 | ||
6a8d013e | 6458 | if (vsi->netdev && vsi->type == ICE_VSI_PF) { |
c7f2c42b | 6459 | ice_set_rx_mode(vsi->netdev); |
9ecd25c2 | 6460 | |
6a8d013e JB |
6461 | err = ice_vsi_vlan_setup(vsi); |
6462 | if (err) | |
6463 | return err; | |
c7f2c42b | 6464 | } |
a629cf0a | 6465 | ice_vsi_cfg_dcb_rings(vsi); |
03f7a986 AV |
6466 | |
6467 | err = ice_vsi_cfg_lan_txqs(vsi); | |
efc2214b MF |
6468 | if (!err && ice_is_xdp_ena_vsi(vsi)) |
6469 | err = ice_vsi_cfg_xdp_txqs(vsi); | |
cdedef59 AV |
6470 | if (!err) |
6471 | err = ice_vsi_cfg_rxqs(vsi); | |
6472 | ||
6473 | return err; | |
6474 | } | |
6475 | ||
cdf1f1f1 | 6476 | /* THEORY OF MODERATION: |
d8eb7ad5 | 6477 | * The ice driver hardware works differently than the hardware that DIMLIB was |
cdf1f1f1 JK |
6478 | * originally made for. ice hardware doesn't have packet count limits that |
6479 | * can trigger an interrupt, but it *does* have interrupt rate limit support, | |
d8eb7ad5 JB |
6480 | * which is hard-coded to a limit of 250,000 ints/second. |
6481 | * If not using dynamic moderation, the INTRL value can be modified | |
6482 | * by ethtool rx-usecs-high. | |
cdf1f1f1 JK |
6483 | */ |
6484 | struct ice_dim { | |
6485 | /* the throttle rate for interrupts, basically worst case delay before | |
6486 | * an initial interrupt fires, value is stored in microseconds. | |
6487 | */ | |
6488 | u16 itr; | |
cdf1f1f1 JK |
6489 | }; |
6490 | ||
6491 | /* Make a different profile for Rx that doesn't allow quite so aggressive | |
d8eb7ad5 JB |
6492 | * moderation at the high end (it maxes out at 126us or about 8k interrupts a |
6493 | * second. | |
cdf1f1f1 JK |
6494 | */ |
6495 | static const struct ice_dim rx_profile[] = { | |
d8eb7ad5 JB |
6496 | {2}, /* 500,000 ints/s, capped at 250K by INTRL */ |
6497 | {8}, /* 125,000 ints/s */ | |
6498 | {16}, /* 62,500 ints/s */ | |
6499 | {62}, /* 16,129 ints/s */ | |
6500 | {126} /* 7,936 ints/s */ | |
cdf1f1f1 JK |
6501 | }; |
6502 | ||
6503 | /* The transmit profile, which has the same sorts of values | |
6504 | * as the previous struct | |
6505 | */ | |
6506 | static const struct ice_dim tx_profile[] = { | |
d8eb7ad5 JB |
6507 | {2}, /* 500,000 ints/s, capped at 250K by INTRL */ |
6508 | {8}, /* 125,000 ints/s */ | |
6509 | {40}, /* 16,125 ints/s */ | |
6510 | {128}, /* 7,812 ints/s */ | |
6511 | {256} /* 3,906 ints/s */ | |
cdf1f1f1 JK |
6512 | }; |
6513 | ||
6514 | static void ice_tx_dim_work(struct work_struct *work) | |
6515 | { | |
6516 | struct ice_ring_container *rc; | |
cdf1f1f1 | 6517 | struct dim *dim; |
d8eb7ad5 | 6518 | u16 itr; |
cdf1f1f1 JK |
6519 | |
6520 | dim = container_of(work, struct dim, work); | |
c59cc267 | 6521 | rc = dim->priv; |
cdf1f1f1 | 6522 | |
d8eb7ad5 | 6523 | WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); |
cdf1f1f1 JK |
6524 | |
6525 | /* look up the values in our local table */ | |
6526 | itr = tx_profile[dim->profile_ix].itr; | |
cdf1f1f1 | 6527 | |
d8eb7ad5 | 6528 | ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim); |
cdf1f1f1 | 6529 | ice_write_itr(rc, itr); |
cdf1f1f1 JK |
6530 | |
6531 | dim->state = DIM_START_MEASURE; | |
6532 | } | |
6533 | ||
6534 | static void ice_rx_dim_work(struct work_struct *work) | |
6535 | { | |
6536 | struct ice_ring_container *rc; | |
cdf1f1f1 | 6537 | struct dim *dim; |
d8eb7ad5 | 6538 | u16 itr; |
cdf1f1f1 JK |
6539 | |
6540 | dim = container_of(work, struct dim, work); | |
c59cc267 | 6541 | rc = dim->priv; |
cdf1f1f1 | 6542 | |
d8eb7ad5 | 6543 | WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); |
cdf1f1f1 JK |
6544 | |
6545 | /* look up the values in our local table */ | |
6546 | itr = rx_profile[dim->profile_ix].itr; | |
cdf1f1f1 | 6547 | |
d8eb7ad5 | 6548 | ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim); |
cdf1f1f1 | 6549 | ice_write_itr(rc, itr); |
cdf1f1f1 JK |
6550 | |
6551 | dim->state = DIM_START_MEASURE; | |
6552 | } | |
6553 | ||
d8eb7ad5 JB |
6554 | #define ICE_DIM_DEFAULT_PROFILE_IX 1 |
6555 | ||
6556 | /** | |
6557 | * ice_init_moderation - set up interrupt moderation | |
6558 | * @q_vector: the vector containing rings to be configured | |
6559 | * | |
6560 | * Set up interrupt moderation registers, with the intent to do the right thing | |
6561 | * when called from reset or from probe, and whether or not dynamic moderation | |
6562 | * is enabled or not. Take special care to write all the registers in both | |
6563 | * dynamic moderation mode or not in order to make sure hardware is in a known | |
6564 | * state. | |
6565 | */ | |
6566 | static void ice_init_moderation(struct ice_q_vector *q_vector) | |
6567 | { | |
6568 | struct ice_ring_container *rc; | |
6569 | bool tx_dynamic, rx_dynamic; | |
6570 | ||
6571 | rc = &q_vector->tx; | |
6572 | INIT_WORK(&rc->dim.work, ice_tx_dim_work); | |
6573 | rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; | |
6574 | rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; | |
6575 | rc->dim.priv = rc; | |
6576 | tx_dynamic = ITR_IS_DYNAMIC(rc); | |
6577 | ||
6578 | /* set the initial TX ITR to match the above */ | |
6579 | ice_write_itr(rc, tx_dynamic ? | |
6580 | tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); | |
6581 | ||
6582 | rc = &q_vector->rx; | |
6583 | INIT_WORK(&rc->dim.work, ice_rx_dim_work); | |
6584 | rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; | |
6585 | rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; | |
6586 | rc->dim.priv = rc; | |
6587 | rx_dynamic = ITR_IS_DYNAMIC(rc); | |
6588 | ||
6589 | /* set the initial RX ITR to match the above */ | |
6590 | ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : | |
6591 | rc->itr_setting); | |
6592 | ||
6593 | ice_set_q_vector_intrl(q_vector); | |
6594 | } | |
6595 | ||
2b245cb2 AV |
6596 | /** |
6597 | * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI | |
6598 | * @vsi: the VSI being configured | |
6599 | */ | |
6600 | static void ice_napi_enable_all(struct ice_vsi *vsi) | |
6601 | { | |
6602 | int q_idx; | |
6603 | ||
6604 | if (!vsi->netdev) | |
6605 | return; | |
6606 | ||
b4603dbf | 6607 | ice_for_each_q_vector(vsi, q_idx) { |
eec90376 YX |
6608 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
6609 | ||
d8eb7ad5 | 6610 | ice_init_moderation(q_vector); |
cdf1f1f1 | 6611 | |
e72bba21 | 6612 | if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) |
eec90376 YX |
6613 | napi_enable(&q_vector->napi); |
6614 | } | |
2b245cb2 AV |
6615 | } |
6616 | ||
cdedef59 AV |
6617 | /** |
6618 | * ice_up_complete - Finish the last steps of bringing up a connection | |
6619 | * @vsi: The VSI being configured | |
6620 | * | |
6621 | * Return 0 on success and negative value on error | |
6622 | */ | |
6623 | static int ice_up_complete(struct ice_vsi *vsi) | |
6624 | { | |
6625 | struct ice_pf *pf = vsi->back; | |
6626 | int err; | |
6627 | ||
ba880734 | 6628 | ice_vsi_cfg_msix(vsi); |
cdedef59 AV |
6629 | |
6630 | /* Enable only Rx rings, Tx rings were enabled by the FW when the | |
6631 | * Tx queue group list was configured and the context bits were | |
6632 | * programmed using ice_vsi_cfg_txqs | |
6633 | */ | |
13a6233b | 6634 | err = ice_vsi_start_all_rx_rings(vsi); |
cdedef59 AV |
6635 | if (err) |
6636 | return err; | |
6637 | ||
e97fb1ae | 6638 | clear_bit(ICE_VSI_DOWN, vsi->state); |
2b245cb2 | 6639 | ice_napi_enable_all(vsi); |
cdedef59 AV |
6640 | ice_vsi_ena_irq(vsi); |
6641 | ||
6642 | if (vsi->port_info && | |
6643 | (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && | |
6a8d013e | 6644 | vsi->netdev && vsi->type == ICE_VSI_PF) { |
cdedef59 AV |
6645 | ice_print_link_msg(vsi, true); |
6646 | netif_tx_start_all_queues(vsi->netdev); | |
6647 | netif_carrier_on(vsi->netdev); | |
6b1ff5d3 | 6648 | ice_ptp_link_change(pf, pf->hw.pf_id, true); |
cdedef59 AV |
6649 | } |
6650 | ||
31b6298f PG |
6651 | /* Perform an initial read of the statistics registers now to |
6652 | * set the baseline so counters are ready when interface is up | |
6653 | */ | |
6654 | ice_update_eth_stats(vsi); | |
6a8d013e JB |
6655 | |
6656 | if (vsi->type == ICE_VSI_PF) | |
6657 | ice_service_task_schedule(pf); | |
cdedef59 | 6658 | |
1b5c19c7 | 6659 | return 0; |
cdedef59 AV |
6660 | } |
6661 | ||
fcea6f3d AV |
6662 | /** |
6663 | * ice_up - Bring the connection back up after being down | |
6664 | * @vsi: VSI being configured | |
6665 | */ | |
6666 | int ice_up(struct ice_vsi *vsi) | |
6667 | { | |
6668 | int err; | |
6669 | ||
0db66d20 | 6670 | err = ice_vsi_cfg_lan(vsi); |
fcea6f3d AV |
6671 | if (!err) |
6672 | err = ice_up_complete(vsi); | |
6673 | ||
6674 | return err; | |
6675 | } | |
6676 | ||
6677 | /** | |
6678 | * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring | |
e72bba21 MF |
6679 | * @syncp: pointer to u64_stats_sync |
6680 | * @stats: stats that pkts and bytes count will be taken from | |
fcea6f3d AV |
6681 | * @pkts: packets stats counter |
6682 | * @bytes: bytes stats counter | |
6683 | * | |
6684 | * This function fetches stats from the ring considering the atomic operations | |
6685 | * that needs to be performed to read u64 values in 32 bit machine. | |
6686 | */ | |
c8ff29b5 MS |
6687 | void |
6688 | ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, | |
6689 | struct ice_q_stats stats, u64 *pkts, u64 *bytes) | |
fcea6f3d AV |
6690 | { |
6691 | unsigned int start; | |
fcea6f3d | 6692 | |
fcea6f3d | 6693 | do { |
068c38ad | 6694 | start = u64_stats_fetch_begin(syncp); |
e72bba21 MF |
6695 | *pkts = stats.pkts; |
6696 | *bytes = stats.bytes; | |
068c38ad | 6697 | } while (u64_stats_fetch_retry(syncp, start)); |
fcea6f3d AV |
6698 | } |
6699 | ||
49d358e0 MP |
6700 | /** |
6701 | * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters | |
6702 | * @vsi: the VSI to be updated | |
1a0f25a5 | 6703 | * @vsi_stats: the stats struct to be updated |
49d358e0 MP |
6704 | * @rings: rings to work on |
6705 | * @count: number of rings | |
6706 | */ | |
6707 | static void | |
1a0f25a5 JB |
6708 | ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, |
6709 | struct rtnl_link_stats64 *vsi_stats, | |
6710 | struct ice_tx_ring **rings, u16 count) | |
49d358e0 | 6711 | { |
49d358e0 MP |
6712 | u16 i; |
6713 | ||
6714 | for (i = 0; i < count; i++) { | |
e72bba21 MF |
6715 | struct ice_tx_ring *ring; |
6716 | u64 pkts = 0, bytes = 0; | |
49d358e0 MP |
6717 | |
6718 | ring = READ_ONCE(rings[i]); | |
288ecf49 | 6719 | if (!ring || !ring->ring_stats) |
f1535469 | 6720 | continue; |
288ecf49 BM |
6721 | ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, |
6722 | ring->ring_stats->stats, &pkts, | |
6723 | &bytes); | |
49d358e0 MP |
6724 | vsi_stats->tx_packets += pkts; |
6725 | vsi_stats->tx_bytes += bytes; | |
288ecf49 BM |
6726 | vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; |
6727 | vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; | |
6728 | vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; | |
49d358e0 MP |
6729 | } |
6730 | } | |
6731 | ||
fcea6f3d AV |
6732 | /** |
6733 | * ice_update_vsi_ring_stats - Update VSI stats counters | |
6734 | * @vsi: the VSI to be updated | |
6735 | */ | |
6736 | static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) | |
6737 | { | |
2fd5e433 | 6738 | struct rtnl_link_stats64 *net_stats, *stats_prev; |
1a0f25a5 | 6739 | struct rtnl_link_stats64 *vsi_stats; |
fcea6f3d AV |
6740 | u64 pkts, bytes; |
6741 | int i; | |
6742 | ||
1a0f25a5 JB |
6743 | vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC); |
6744 | if (!vsi_stats) | |
6745 | return; | |
fcea6f3d AV |
6746 | |
6747 | /* reset non-netdev (extended) stats */ | |
6748 | vsi->tx_restart = 0; | |
6749 | vsi->tx_busy = 0; | |
6750 | vsi->tx_linearize = 0; | |
6751 | vsi->rx_buf_failed = 0; | |
6752 | vsi->rx_page_failed = 0; | |
6753 | ||
6754 | rcu_read_lock(); | |
6755 | ||
6756 | /* update Tx rings counters */ | |
1a0f25a5 JB |
6757 | ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, |
6758 | vsi->num_txq); | |
fcea6f3d AV |
6759 | |
6760 | /* update Rx rings counters */ | |
6761 | ice_for_each_rxq(vsi, i) { | |
e72bba21 | 6762 | struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); |
288ecf49 | 6763 | struct ice_ring_stats *ring_stats; |
b6b0501d | 6764 | |
288ecf49 BM |
6765 | ring_stats = ring->ring_stats; |
6766 | ice_fetch_u64_stats_per_ring(&ring_stats->syncp, | |
6767 | ring_stats->stats, &pkts, | |
6768 | &bytes); | |
fcea6f3d AV |
6769 | vsi_stats->rx_packets += pkts; |
6770 | vsi_stats->rx_bytes += bytes; | |
288ecf49 BM |
6771 | vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; |
6772 | vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; | |
fcea6f3d AV |
6773 | } |
6774 | ||
49d358e0 MP |
6775 | /* update XDP Tx rings counters */ |
6776 | if (ice_is_xdp_ena_vsi(vsi)) | |
1a0f25a5 | 6777 | ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, |
49d358e0 MP |
6778 | vsi->num_xdp_txq); |
6779 | ||
fcea6f3d | 6780 | rcu_read_unlock(); |
1a0f25a5 | 6781 | |
2fd5e433 BM |
6782 | net_stats = &vsi->net_stats; |
6783 | stats_prev = &vsi->net_stats_prev; | |
6784 | ||
6785 | /* clear prev counters after reset */ | |
6786 | if (vsi_stats->tx_packets < stats_prev->tx_packets || | |
6787 | vsi_stats->rx_packets < stats_prev->rx_packets) { | |
6788 | stats_prev->tx_packets = 0; | |
6789 | stats_prev->tx_bytes = 0; | |
6790 | stats_prev->rx_packets = 0; | |
6791 | stats_prev->rx_bytes = 0; | |
6792 | } | |
6793 | ||
6794 | /* update netdev counters */ | |
6795 | net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; | |
6796 | net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; | |
6797 | net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; | |
6798 | net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; | |
6799 | ||
6800 | stats_prev->tx_packets = vsi_stats->tx_packets; | |
6801 | stats_prev->tx_bytes = vsi_stats->tx_bytes; | |
6802 | stats_prev->rx_packets = vsi_stats->rx_packets; | |
6803 | stats_prev->rx_bytes = vsi_stats->rx_bytes; | |
1a0f25a5 JB |
6804 | |
6805 | kfree(vsi_stats); | |
fcea6f3d AV |
6806 | } |
6807 | ||
6808 | /** | |
6809 | * ice_update_vsi_stats - Update VSI stats counters | |
6810 | * @vsi: the VSI to be updated | |
6811 | */ | |
5a4a8673 | 6812 | void ice_update_vsi_stats(struct ice_vsi *vsi) |
fcea6f3d AV |
6813 | { |
6814 | struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; | |
6815 | struct ice_eth_stats *cur_es = &vsi->eth_stats; | |
6816 | struct ice_pf *pf = vsi->back; | |
6817 | ||
e97fb1ae | 6818 | if (test_bit(ICE_VSI_DOWN, vsi->state) || |
7e408e07 | 6819 | test_bit(ICE_CFG_BUSY, pf->state)) |
fcea6f3d AV |
6820 | return; |
6821 | ||
6822 | /* get stats as recorded by Tx/Rx rings */ | |
6823 | ice_update_vsi_ring_stats(vsi); | |
6824 | ||
6825 | /* get VSI stats as recorded by the hardware */ | |
6826 | ice_update_eth_stats(vsi); | |
6827 | ||
6828 | cur_ns->tx_errors = cur_es->tx_errors; | |
51fe27e1 | 6829 | cur_ns->rx_dropped = cur_es->rx_discards; |
fcea6f3d AV |
6830 | cur_ns->tx_dropped = cur_es->tx_discards; |
6831 | cur_ns->multicast = cur_es->rx_multicast; | |
6832 | ||
6833 | /* update some more netdev stats if this is main VSI */ | |
6834 | if (vsi->type == ICE_VSI_PF) { | |
6835 | cur_ns->rx_crc_errors = pf->stats.crc_errors; | |
6836 | cur_ns->rx_errors = pf->stats.crc_errors + | |
4f1fe43c | 6837 | pf->stats.illegal_bytes + |
4f1fe43c BC |
6838 | pf->stats.rx_undersize + |
6839 | pf->hw_csum_rx_error + | |
6840 | pf->stats.rx_jabber + | |
6841 | pf->stats.rx_fragments + | |
6842 | pf->stats.rx_oversize; | |
56923ab6 BC |
6843 | /* record drops from the port level */ |
6844 | cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; | |
fcea6f3d AV |
6845 | } |
6846 | } | |
6847 | ||
6848 | /** | |
6849 | * ice_update_pf_stats - Update PF port stats counters | |
6850 | * @pf: PF whose stats needs to be updated | |
6851 | */ | |
5a4a8673 | 6852 | void ice_update_pf_stats(struct ice_pf *pf) |
fcea6f3d AV |
6853 | { |
6854 | struct ice_hw_port_stats *prev_ps, *cur_ps; | |
6855 | struct ice_hw *hw = &pf->hw; | |
4ab95646 | 6856 | u16 fd_ctr_base; |
9e7a5d17 | 6857 | u8 port; |
fcea6f3d | 6858 | |
9e7a5d17 | 6859 | port = hw->port_info->lport; |
fcea6f3d AV |
6860 | prev_ps = &pf->stats_prev; |
6861 | cur_ps = &pf->stats; | |
fcea6f3d | 6862 | |
2fd5e433 BM |
6863 | if (ice_is_reset_in_progress(pf->state)) |
6864 | pf->stat_prev_loaded = false; | |
6865 | ||
9e7a5d17 | 6866 | ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, |
36517fd3 | 6867 | &prev_ps->eth.rx_bytes, |
fcea6f3d AV |
6868 | &cur_ps->eth.rx_bytes); |
6869 | ||
9e7a5d17 | 6870 | ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, |
36517fd3 | 6871 | &prev_ps->eth.rx_unicast, |
fcea6f3d AV |
6872 | &cur_ps->eth.rx_unicast); |
6873 | ||
9e7a5d17 | 6874 | ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, |
36517fd3 | 6875 | &prev_ps->eth.rx_multicast, |
fcea6f3d AV |
6876 | &cur_ps->eth.rx_multicast); |
6877 | ||
9e7a5d17 | 6878 | ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, |
36517fd3 | 6879 | &prev_ps->eth.rx_broadcast, |
fcea6f3d AV |
6880 | &cur_ps->eth.rx_broadcast); |
6881 | ||
56923ab6 BC |
6882 | ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, |
6883 | &prev_ps->eth.rx_discards, | |
6884 | &cur_ps->eth.rx_discards); | |
6885 | ||
9e7a5d17 | 6886 | ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, |
36517fd3 | 6887 | &prev_ps->eth.tx_bytes, |
fcea6f3d AV |
6888 | &cur_ps->eth.tx_bytes); |
6889 | ||
9e7a5d17 | 6890 | ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, |
36517fd3 | 6891 | &prev_ps->eth.tx_unicast, |
fcea6f3d AV |
6892 | &cur_ps->eth.tx_unicast); |
6893 | ||
9e7a5d17 | 6894 | ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, |
36517fd3 | 6895 | &prev_ps->eth.tx_multicast, |
fcea6f3d AV |
6896 | &cur_ps->eth.tx_multicast); |
6897 | ||
9e7a5d17 | 6898 | ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, |
36517fd3 | 6899 | &prev_ps->eth.tx_broadcast, |
fcea6f3d AV |
6900 | &cur_ps->eth.tx_broadcast); |
6901 | ||
9e7a5d17 | 6902 | ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6903 | &prev_ps->tx_dropped_link_down, |
6904 | &cur_ps->tx_dropped_link_down); | |
6905 | ||
9e7a5d17 | 6906 | ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, |
36517fd3 | 6907 | &prev_ps->rx_size_64, &cur_ps->rx_size_64); |
fcea6f3d | 6908 | |
9e7a5d17 | 6909 | ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, |
36517fd3 | 6910 | &prev_ps->rx_size_127, &cur_ps->rx_size_127); |
fcea6f3d | 6911 | |
9e7a5d17 | 6912 | ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, |
36517fd3 | 6913 | &prev_ps->rx_size_255, &cur_ps->rx_size_255); |
fcea6f3d | 6914 | |
9e7a5d17 | 6915 | ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, |
36517fd3 | 6916 | &prev_ps->rx_size_511, &cur_ps->rx_size_511); |
fcea6f3d | 6917 | |
9e7a5d17 | 6918 | ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6919 | &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); |
6920 | ||
9e7a5d17 | 6921 | ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6922 | &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); |
6923 | ||
9e7a5d17 | 6924 | ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6925 | &prev_ps->rx_size_big, &cur_ps->rx_size_big); |
6926 | ||
9e7a5d17 | 6927 | ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, |
36517fd3 | 6928 | &prev_ps->tx_size_64, &cur_ps->tx_size_64); |
fcea6f3d | 6929 | |
9e7a5d17 | 6930 | ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, |
36517fd3 | 6931 | &prev_ps->tx_size_127, &cur_ps->tx_size_127); |
fcea6f3d | 6932 | |
9e7a5d17 | 6933 | ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, |
36517fd3 | 6934 | &prev_ps->tx_size_255, &cur_ps->tx_size_255); |
fcea6f3d | 6935 | |
9e7a5d17 | 6936 | ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, |
36517fd3 | 6937 | &prev_ps->tx_size_511, &cur_ps->tx_size_511); |
fcea6f3d | 6938 | |
9e7a5d17 | 6939 | ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6940 | &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); |
6941 | ||
9e7a5d17 | 6942 | ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6943 | &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); |
6944 | ||
9e7a5d17 | 6945 | ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6946 | &prev_ps->tx_size_big, &cur_ps->tx_size_big); |
6947 | ||
4ab95646 HT |
6948 | fd_ctr_base = hw->fd_ctr_base; |
6949 | ||
6950 | ice_stat_update40(hw, | |
6951 | GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), | |
6952 | pf->stat_prev_loaded, &prev_ps->fd_sb_match, | |
6953 | &cur_ps->fd_sb_match); | |
9e7a5d17 | 6954 | ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6955 | &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); |
6956 | ||
9e7a5d17 | 6957 | ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6958 | &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); |
6959 | ||
9e7a5d17 | 6960 | ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6961 | &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); |
6962 | ||
9e7a5d17 | 6963 | ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6964 | &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); |
6965 | ||
4b0fdceb AV |
6966 | ice_update_dcb_stats(pf); |
6967 | ||
9e7a5d17 | 6968 | ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6969 | &prev_ps->crc_errors, &cur_ps->crc_errors); |
6970 | ||
9e7a5d17 | 6971 | ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6972 | &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); |
6973 | ||
9e7a5d17 | 6974 | ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6975 | &prev_ps->mac_local_faults, |
6976 | &cur_ps->mac_local_faults); | |
6977 | ||
9e7a5d17 | 6978 | ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6979 | &prev_ps->mac_remote_faults, |
6980 | &cur_ps->mac_remote_faults); | |
6981 | ||
9e7a5d17 | 6982 | ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6983 | &prev_ps->rx_undersize, &cur_ps->rx_undersize); |
6984 | ||
9e7a5d17 | 6985 | ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6986 | &prev_ps->rx_fragments, &cur_ps->rx_fragments); |
6987 | ||
9e7a5d17 | 6988 | ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6989 | &prev_ps->rx_oversize, &cur_ps->rx_oversize); |
6990 | ||
9e7a5d17 | 6991 | ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
6992 | &prev_ps->rx_jabber, &cur_ps->rx_jabber); |
6993 | ||
4ab95646 HT |
6994 | cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; |
6995 | ||
fcea6f3d AV |
6996 | pf->stat_prev_loaded = true; |
6997 | } | |
6998 | ||
6999 | /** | |
7000 | * ice_get_stats64 - get statistics for network device structure | |
7001 | * @netdev: network interface device structure | |
7002 | * @stats: main device statistics structure | |
7003 | */ | |
7004 | static | |
7005 | void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |
7006 | { | |
7007 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
7008 | struct rtnl_link_stats64 *vsi_stats; | |
7009 | struct ice_vsi *vsi = np->vsi; | |
7010 | ||
7011 | vsi_stats = &vsi->net_stats; | |
7012 | ||
3d57fd10 | 7013 | if (!vsi->num_txq || !vsi->num_rxq) |
fcea6f3d | 7014 | return; |
3d57fd10 | 7015 | |
fcea6f3d AV |
7016 | /* netdev packet/byte stats come from ring counter. These are obtained |
7017 | * by summing up ring counters (done by ice_update_vsi_ring_stats). | |
3d57fd10 DE |
7018 | * But, only call the update routine and read the registers if VSI is |
7019 | * not down. | |
fcea6f3d | 7020 | */ |
e97fb1ae | 7021 | if (!test_bit(ICE_VSI_DOWN, vsi->state)) |
3d57fd10 | 7022 | ice_update_vsi_ring_stats(vsi); |
fcea6f3d AV |
7023 | stats->tx_packets = vsi_stats->tx_packets; |
7024 | stats->tx_bytes = vsi_stats->tx_bytes; | |
7025 | stats->rx_packets = vsi_stats->rx_packets; | |
7026 | stats->rx_bytes = vsi_stats->rx_bytes; | |
7027 | ||
7028 | /* The rest of the stats can be read from the hardware but instead we | |
7029 | * just return values that the watchdog task has already obtained from | |
7030 | * the hardware. | |
7031 | */ | |
7032 | stats->multicast = vsi_stats->multicast; | |
7033 | stats->tx_errors = vsi_stats->tx_errors; | |
7034 | stats->tx_dropped = vsi_stats->tx_dropped; | |
7035 | stats->rx_errors = vsi_stats->rx_errors; | |
7036 | stats->rx_dropped = vsi_stats->rx_dropped; | |
7037 | stats->rx_crc_errors = vsi_stats->rx_crc_errors; | |
7038 | stats->rx_length_errors = vsi_stats->rx_length_errors; | |
7039 | } | |
7040 | ||
2b245cb2 AV |
7041 | /** |
7042 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI | |
7043 | * @vsi: VSI having NAPI disabled | |
7044 | */ | |
7045 | static void ice_napi_disable_all(struct ice_vsi *vsi) | |
7046 | { | |
7047 | int q_idx; | |
7048 | ||
7049 | if (!vsi->netdev) | |
7050 | return; | |
7051 | ||
0c2561c8 | 7052 | ice_for_each_q_vector(vsi, q_idx) { |
eec90376 YX |
7053 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
7054 | ||
e72bba21 | 7055 | if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) |
eec90376 | 7056 | napi_disable(&q_vector->napi); |
cdf1f1f1 JK |
7057 | |
7058 | cancel_work_sync(&q_vector->tx.dim.work); | |
7059 | cancel_work_sync(&q_vector->rx.dim.work); | |
eec90376 | 7060 | } |
2b245cb2 AV |
7061 | } |
7062 | ||
cdedef59 AV |
7063 | /** |
7064 | * ice_down - Shutdown the connection | |
7065 | * @vsi: The VSI being stopped | |
21c6e36b JB |
7066 | * |
7067 | * Caller of this function is expected to set the vsi->state ICE_DOWN bit | |
cdedef59 | 7068 | */ |
fcea6f3d | 7069 | int ice_down(struct ice_vsi *vsi) |
cdedef59 | 7070 | { |
8ac71327 | 7071 | int i, tx_err, rx_err, vlan_err = 0; |
cdedef59 | 7072 | |
21c6e36b JB |
7073 | WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); |
7074 | ||
b3be918d | 7075 | if (vsi->netdev && vsi->type == ICE_VSI_PF) { |
c31af68a | 7076 | vlan_err = ice_vsi_del_vlan_zero(vsi); |
6b1ff5d3 | 7077 | ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); |
cdedef59 AV |
7078 | netif_carrier_off(vsi->netdev); |
7079 | netif_tx_disable(vsi->netdev); | |
b3be918d GN |
7080 | } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { |
7081 | ice_eswitch_stop_all_tx_queues(vsi->back); | |
cdedef59 AV |
7082 | } |
7083 | ||
7084 | ice_vsi_dis_irq(vsi); | |
03f7a986 AV |
7085 | |
7086 | tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); | |
72adf242 | 7087 | if (tx_err) |
19cce2c6 | 7088 | netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", |
72adf242 | 7089 | vsi->vsi_num, tx_err); |
efc2214b MF |
7090 | if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { |
7091 | tx_err = ice_vsi_stop_xdp_tx_rings(vsi); | |
7092 | if (tx_err) | |
19cce2c6 | 7093 | netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", |
efc2214b MF |
7094 | vsi->vsi_num, tx_err); |
7095 | } | |
72adf242 | 7096 | |
13a6233b | 7097 | rx_err = ice_vsi_stop_all_rx_rings(vsi); |
72adf242 | 7098 | if (rx_err) |
19cce2c6 | 7099 | netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", |
72adf242 AV |
7100 | vsi->vsi_num, rx_err); |
7101 | ||
2b245cb2 | 7102 | ice_napi_disable_all(vsi); |
cdedef59 AV |
7103 | |
7104 | ice_for_each_txq(vsi, i) | |
7105 | ice_clean_tx_ring(vsi->tx_rings[i]); | |
7106 | ||
78c50d69 KM |
7107 | if (ice_is_xdp_ena_vsi(vsi)) |
7108 | ice_for_each_xdp_txq(vsi, i) | |
7109 | ice_clean_tx_ring(vsi->xdp_rings[i]); | |
7110 | ||
cdedef59 AV |
7111 | ice_for_each_rxq(vsi, i) |
7112 | ice_clean_rx_ring(vsi->rx_rings[i]); | |
7113 | ||
8ac71327 | 7114 | if (tx_err || rx_err || vlan_err) { |
19cce2c6 | 7115 | netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", |
cdedef59 | 7116 | vsi->vsi_num, vsi->vsw->sw_id); |
72adf242 AV |
7117 | return -EIO; |
7118 | } | |
7119 | ||
7120 | return 0; | |
cdedef59 AV |
7121 | } |
7122 | ||
dddd406d JB |
7123 | /** |
7124 | * ice_down_up - shutdown the VSI connection and bring it up | |
7125 | * @vsi: the VSI to be reconnected | |
7126 | */ | |
7127 | int ice_down_up(struct ice_vsi *vsi) | |
7128 | { | |
7129 | int ret; | |
7130 | ||
7131 | /* if DOWN already set, nothing to do */ | |
7132 | if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) | |
7133 | return 0; | |
7134 | ||
7135 | ret = ice_down(vsi); | |
7136 | if (ret) | |
7137 | return ret; | |
7138 | ||
7139 | ret = ice_up(vsi); | |
7140 | if (ret) { | |
7141 | netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); | |
7142 | return ret; | |
7143 | } | |
7144 | ||
7145 | return 0; | |
7146 | } | |
7147 | ||
cdedef59 AV |
7148 | /** |
7149 | * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources | |
7150 | * @vsi: VSI having resources allocated | |
7151 | * | |
7152 | * Return 0 on success, negative on failure | |
7153 | */ | |
0e674aeb | 7154 | int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
cdedef59 | 7155 | { |
dab0588f | 7156 | int i, err = 0; |
cdedef59 AV |
7157 | |
7158 | if (!vsi->num_txq) { | |
9a946843 | 7159 | dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", |
cdedef59 AV |
7160 | vsi->vsi_num); |
7161 | return -EINVAL; | |
7162 | } | |
7163 | ||
7164 | ice_for_each_txq(vsi, i) { | |
e72bba21 | 7165 | struct ice_tx_ring *ring = vsi->tx_rings[i]; |
eb0ee8ab MS |
7166 | |
7167 | if (!ring) | |
7168 | return -EINVAL; | |
7169 | ||
1c54c839 GN |
7170 | if (vsi->netdev) |
7171 | ring->netdev = vsi->netdev; | |
eb0ee8ab | 7172 | err = ice_setup_tx_ring(ring); |
cdedef59 AV |
7173 | if (err) |
7174 | break; | |
7175 | } | |
7176 | ||
7177 | return err; | |
7178 | } | |
7179 | ||
7180 | /** | |
7181 | * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources | |
7182 | * @vsi: VSI having resources allocated | |
7183 | * | |
7184 | * Return 0 on success, negative on failure | |
7185 | */ | |
0e674aeb | 7186 | int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
cdedef59 | 7187 | { |
dab0588f | 7188 | int i, err = 0; |
cdedef59 AV |
7189 | |
7190 | if (!vsi->num_rxq) { | |
9a946843 | 7191 | dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", |
cdedef59 AV |
7192 | vsi->vsi_num); |
7193 | return -EINVAL; | |
7194 | } | |
7195 | ||
7196 | ice_for_each_rxq(vsi, i) { | |
e72bba21 | 7197 | struct ice_rx_ring *ring = vsi->rx_rings[i]; |
eb0ee8ab MS |
7198 | |
7199 | if (!ring) | |
7200 | return -EINVAL; | |
7201 | ||
1c54c839 GN |
7202 | if (vsi->netdev) |
7203 | ring->netdev = vsi->netdev; | |
eb0ee8ab | 7204 | err = ice_setup_rx_ring(ring); |
cdedef59 AV |
7205 | if (err) |
7206 | break; | |
7207 | } | |
7208 | ||
7209 | return err; | |
7210 | } | |
7211 | ||
148beb61 HT |
7212 | /** |
7213 | * ice_vsi_open_ctrl - open control VSI for use | |
7214 | * @vsi: the VSI to open | |
7215 | * | |
7216 | * Initialization of the Control VSI | |
7217 | * | |
7218 | * Returns 0 on success, negative value on error | |
7219 | */ | |
7220 | int ice_vsi_open_ctrl(struct ice_vsi *vsi) | |
7221 | { | |
7222 | char int_name[ICE_INT_NAME_STR_LEN]; | |
7223 | struct ice_pf *pf = vsi->back; | |
7224 | struct device *dev; | |
7225 | int err; | |
7226 | ||
7227 | dev = ice_pf_to_dev(pf); | |
7228 | /* allocate descriptors */ | |
7229 | err = ice_vsi_setup_tx_rings(vsi); | |
7230 | if (err) | |
7231 | goto err_setup_tx; | |
7232 | ||
7233 | err = ice_vsi_setup_rx_rings(vsi); | |
7234 | if (err) | |
7235 | goto err_setup_rx; | |
7236 | ||
0db66d20 | 7237 | err = ice_vsi_cfg_lan(vsi); |
148beb61 HT |
7238 | if (err) |
7239 | goto err_setup_rx; | |
7240 | ||
7241 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", | |
7242 | dev_driver_string(dev), dev_name(dev)); | |
7243 | err = ice_vsi_req_irq_msix(vsi, int_name); | |
7244 | if (err) | |
7245 | goto err_setup_rx; | |
7246 | ||
7247 | ice_vsi_cfg_msix(vsi); | |
7248 | ||
7249 | err = ice_vsi_start_all_rx_rings(vsi); | |
7250 | if (err) | |
7251 | goto err_up_complete; | |
7252 | ||
e97fb1ae | 7253 | clear_bit(ICE_VSI_DOWN, vsi->state); |
148beb61 HT |
7254 | ice_vsi_ena_irq(vsi); |
7255 | ||
7256 | return 0; | |
7257 | ||
7258 | err_up_complete: | |
7259 | ice_down(vsi); | |
7260 | err_setup_rx: | |
7261 | ice_vsi_free_rx_rings(vsi); | |
7262 | err_setup_tx: | |
7263 | ice_vsi_free_tx_rings(vsi); | |
7264 | ||
7265 | return err; | |
7266 | } | |
7267 | ||
cdedef59 AV |
7268 | /** |
7269 | * ice_vsi_open - Called when a network interface is made active | |
7270 | * @vsi: the VSI to open | |
7271 | * | |
7272 | * Initialization of the VSI | |
7273 | * | |
7274 | * Returns 0 on success, negative value on error | |
7275 | */ | |
1a1c40df | 7276 | int ice_vsi_open(struct ice_vsi *vsi) |
cdedef59 AV |
7277 | { |
7278 | char int_name[ICE_INT_NAME_STR_LEN]; | |
7279 | struct ice_pf *pf = vsi->back; | |
7280 | int err; | |
7281 | ||
7282 | /* allocate descriptors */ | |
7283 | err = ice_vsi_setup_tx_rings(vsi); | |
7284 | if (err) | |
7285 | goto err_setup_tx; | |
7286 | ||
7287 | err = ice_vsi_setup_rx_rings(vsi); | |
7288 | if (err) | |
7289 | goto err_setup_rx; | |
7290 | ||
0db66d20 | 7291 | err = ice_vsi_cfg_lan(vsi); |
cdedef59 AV |
7292 | if (err) |
7293 | goto err_setup_rx; | |
7294 | ||
7295 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s", | |
4015d11e | 7296 | dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); |
ba880734 | 7297 | err = ice_vsi_req_irq_msix(vsi, int_name); |
cdedef59 AV |
7298 | if (err) |
7299 | goto err_setup_rx; | |
7300 | ||
122045ca MS |
7301 | ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); |
7302 | ||
1a1c40df GN |
7303 | if (vsi->type == ICE_VSI_PF) { |
7304 | /* Notify the stack of the actual queue counts. */ | |
7305 | err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); | |
7306 | if (err) | |
7307 | goto err_set_qs; | |
cdedef59 | 7308 | |
1a1c40df GN |
7309 | err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); |
7310 | if (err) | |
7311 | goto err_set_qs; | |
7312 | } | |
cdedef59 AV |
7313 | |
7314 | err = ice_up_complete(vsi); | |
7315 | if (err) | |
7316 | goto err_up_complete; | |
7317 | ||
7318 | return 0; | |
7319 | ||
7320 | err_up_complete: | |
7321 | ice_down(vsi); | |
7322 | err_set_qs: | |
7323 | ice_vsi_free_irq(vsi); | |
7324 | err_setup_rx: | |
7325 | ice_vsi_free_rx_rings(vsi); | |
7326 | err_setup_tx: | |
7327 | ice_vsi_free_tx_rings(vsi); | |
7328 | ||
7329 | return err; | |
7330 | } | |
7331 | ||
0f9d5027 AV |
7332 | /** |
7333 | * ice_vsi_release_all - Delete all VSIs | |
7334 | * @pf: PF from which all VSIs are being removed | |
7335 | */ | |
7336 | static void ice_vsi_release_all(struct ice_pf *pf) | |
7337 | { | |
7338 | int err, i; | |
7339 | ||
7340 | if (!pf->vsi) | |
7341 | return; | |
7342 | ||
80ed404a | 7343 | ice_for_each_vsi(pf, i) { |
0f9d5027 AV |
7344 | if (!pf->vsi[i]) |
7345 | continue; | |
7346 | ||
fbc7b27a KP |
7347 | if (pf->vsi[i]->type == ICE_VSI_CHNL) |
7348 | continue; | |
7349 | ||
0f9d5027 AV |
7350 | err = ice_vsi_release(pf->vsi[i]); |
7351 | if (err) | |
19cce2c6 | 7352 | dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", |
0f9d5027 AV |
7353 | i, err, pf->vsi[i]->vsi_num); |
7354 | } | |
7355 | } | |
7356 | ||
0f9d5027 | 7357 | /** |
462acf6a TN |
7358 | * ice_vsi_rebuild_by_type - Rebuild VSI of a given type |
7359 | * @pf: pointer to the PF instance | |
7360 | * @type: VSI type to rebuild | |
7361 | * | |
7362 | * Iterates through the pf->vsi array and rebuilds VSIs of the requested type | |
0f9d5027 | 7363 | */ |
462acf6a | 7364 | static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) |
0f9d5027 | 7365 | { |
4015d11e | 7366 | struct device *dev = ice_pf_to_dev(pf); |
462acf6a | 7367 | int i, err; |
0f9d5027 | 7368 | |
80ed404a | 7369 | ice_for_each_vsi(pf, i) { |
4425e053 | 7370 | struct ice_vsi *vsi = pf->vsi[i]; |
0f9d5027 | 7371 | |
462acf6a | 7372 | if (!vsi || vsi->type != type) |
0f9d5027 AV |
7373 | continue; |
7374 | ||
462acf6a | 7375 | /* rebuild the VSI */ |
6624e780 | 7376 | err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); |
0f9d5027 | 7377 | if (err) { |
19cce2c6 | 7378 | dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", |
964674f1 | 7379 | err, vsi->idx, ice_vsi_type_str(type)); |
0f9d5027 AV |
7380 | return err; |
7381 | } | |
7382 | ||
462acf6a | 7383 | /* replay filters for the VSI */ |
2ccc1c1c TN |
7384 | err = ice_replay_vsi(&pf->hw, vsi->idx); |
7385 | if (err) { | |
5f87ec48 | 7386 | dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n", |
2ccc1c1c | 7387 | err, vsi->idx, ice_vsi_type_str(type)); |
c1484691 | 7388 | return err; |
462acf6a TN |
7389 | } |
7390 | ||
7391 | /* Re-map HW VSI number, using VSI handle that has been | |
7392 | * previously validated in ice_replay_vsi() call above | |
7393 | */ | |
7394 | vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); | |
7395 | ||
7396 | /* enable the VSI */ | |
7397 | err = ice_ena_vsi(vsi, false); | |
7398 | if (err) { | |
19cce2c6 | 7399 | dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", |
964674f1 | 7400 | err, vsi->idx, ice_vsi_type_str(type)); |
462acf6a TN |
7401 | return err; |
7402 | } | |
7403 | ||
4015d11e BC |
7404 | dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, |
7405 | ice_vsi_type_str(type)); | |
0f9d5027 AV |
7406 | } |
7407 | ||
7408 | return 0; | |
0b28b702 AV |
7409 | } |
7410 | ||
334cb062 | 7411 | /** |
462acf6a TN |
7412 | * ice_update_pf_netdev_link - Update PF netdev link status |
7413 | * @pf: pointer to the PF instance | |
334cb062 | 7414 | */ |
462acf6a | 7415 | static void ice_update_pf_netdev_link(struct ice_pf *pf) |
334cb062 | 7416 | { |
462acf6a | 7417 | bool link_up; |
334cb062 AV |
7418 | int i; |
7419 | ||
80ed404a | 7420 | ice_for_each_vsi(pf, i) { |
4425e053 KK |
7421 | struct ice_vsi *vsi = pf->vsi[i]; |
7422 | ||
462acf6a TN |
7423 | if (!vsi || vsi->type != ICE_VSI_PF) |
7424 | return; | |
334cb062 | 7425 | |
462acf6a TN |
7426 | ice_get_link_status(pf->vsi[i]->port_info, &link_up); |
7427 | if (link_up) { | |
7428 | netif_carrier_on(pf->vsi[i]->netdev); | |
7429 | netif_tx_wake_all_queues(pf->vsi[i]->netdev); | |
7430 | } else { | |
7431 | netif_carrier_off(pf->vsi[i]->netdev); | |
7432 | netif_tx_stop_all_queues(pf->vsi[i]->netdev); | |
334cb062 | 7433 | } |
334cb062 | 7434 | } |
334cb062 AV |
7435 | } |
7436 | ||
0b28b702 AV |
7437 | /** |
7438 | * ice_rebuild - rebuild after reset | |
2f2da36e | 7439 | * @pf: PF to rebuild |
462acf6a | 7440 | * @reset_type: type of reset |
12bb018c BC |
7441 | * |
7442 | * Do not rebuild VF VSI in this flow because that is already handled via | |
7443 | * ice_reset_all_vfs(). This is because requirements for resetting a VF after a | |
7444 | * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want | |
7445 | * to reset/rebuild all the VF VSI twice. | |
0b28b702 | 7446 | */ |
462acf6a | 7447 | static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) |
0b28b702 | 7448 | { |
4015d11e | 7449 | struct device *dev = ice_pf_to_dev(pf); |
0b28b702 | 7450 | struct ice_hw *hw = &pf->hw; |
a1ffafb0 | 7451 | bool dvm; |
462acf6a | 7452 | int err; |
0b28b702 | 7453 | |
7e408e07 | 7454 | if (test_bit(ICE_DOWN, pf->state)) |
0b28b702 AV |
7455 | goto clear_recovery; |
7456 | ||
462acf6a | 7457 | dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); |
0b28b702 | 7458 | |
b537752e | 7459 | #define ICE_EMP_RESET_SLEEP_MS 5000 |
399e27db JK |
7460 | if (reset_type == ICE_RESET_EMPR) { |
7461 | /* If an EMP reset has occurred, any previously pending flash | |
7462 | * update will have completed. We no longer know whether or | |
7463 | * not the NVM update EMP reset is restricted. | |
7464 | */ | |
7465 | pf->fw_emp_reset_disabled = false; | |
b537752e PO |
7466 | |
7467 | msleep(ICE_EMP_RESET_SLEEP_MS); | |
399e27db JK |
7468 | } |
7469 | ||
2ccc1c1c TN |
7470 | err = ice_init_all_ctrlq(hw); |
7471 | if (err) { | |
7472 | dev_err(dev, "control queues init failed %d\n", err); | |
0f9d5027 | 7473 | goto err_init_ctrlq; |
0b28b702 AV |
7474 | } |
7475 | ||
462acf6a TN |
7476 | /* if DDP was previously loaded successfully */ |
7477 | if (!ice_is_safe_mode(pf)) { | |
7478 | /* reload the SW DB of filter tables */ | |
7479 | if (reset_type == ICE_RESET_PFR) | |
7480 | ice_fill_blk_tbls(hw); | |
7481 | else | |
7482 | /* Reload DDP Package after CORER/GLOBR reset */ | |
7483 | ice_load_pkg(NULL, pf); | |
7484 | } | |
7485 | ||
2ccc1c1c TN |
7486 | err = ice_clear_pf_cfg(hw); |
7487 | if (err) { | |
7488 | dev_err(dev, "clear PF configuration failed %d\n", err); | |
0f9d5027 | 7489 | goto err_init_ctrlq; |
0b28b702 AV |
7490 | } |
7491 | ||
7492 | ice_clear_pxe_mode(hw); | |
7493 | ||
2ccc1c1c TN |
7494 | err = ice_init_nvm(hw); |
7495 | if (err) { | |
7496 | dev_err(dev, "ice_init_nvm failed %d\n", err); | |
97a4ec01 JK |
7497 | goto err_init_ctrlq; |
7498 | } | |
7499 | ||
2ccc1c1c TN |
7500 | err = ice_get_caps(hw); |
7501 | if (err) { | |
7502 | dev_err(dev, "ice_get_caps failed %d\n", err); | |
0f9d5027 | 7503 | goto err_init_ctrlq; |
0b28b702 AV |
7504 | } |
7505 | ||
2ccc1c1c TN |
7506 | err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); |
7507 | if (err) { | |
7508 | dev_err(dev, "set_mac_cfg failed %d\n", err); | |
42449105 AV |
7509 | goto err_init_ctrlq; |
7510 | } | |
7511 | ||
a1ffafb0 BC |
7512 | dvm = ice_is_dvm_ena(hw); |
7513 | ||
7514 | err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); | |
7515 | if (err) | |
7516 | goto err_init_ctrlq; | |
7517 | ||
0f9d5027 AV |
7518 | err = ice_sched_init_port(hw->port_info); |
7519 | if (err) | |
7520 | goto err_sched_init_port; | |
7521 | ||
0b28b702 | 7522 | /* start misc vector */ |
ba880734 BC |
7523 | err = ice_req_irq_msix_misc(pf); |
7524 | if (err) { | |
7525 | dev_err(dev, "misc vector setup failed: %d\n", err); | |
462acf6a | 7526 | goto err_sched_init_port; |
0b28b702 AV |
7527 | } |
7528 | ||
83af0039 HT |
7529 | if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
7530 | wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); | |
7531 | if (!rd32(hw, PFQF_FD_SIZE)) { | |
7532 | u16 unused, guar, b_effort; | |
7533 | ||
7534 | guar = hw->func_caps.fd_fltr_guar; | |
7535 | b_effort = hw->func_caps.fd_fltr_best_effort; | |
7536 | ||
7537 | /* force guaranteed filter pool for PF */ | |
7538 | ice_alloc_fd_guar_item(hw, &unused, guar); | |
7539 | /* force shared filter pool for PF */ | |
7540 | ice_alloc_fd_shrd_item(hw, &unused, b_effort); | |
7541 | } | |
7542 | } | |
7543 | ||
462acf6a TN |
7544 | if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
7545 | ice_dcb_rebuild(pf); | |
7546 | ||
06c16d89 JK |
7547 | /* If the PF previously had enabled PTP, PTP init needs to happen before |
7548 | * the VSI rebuild. If not, this causes the PTP link status events to | |
7549 | * fail. | |
7550 | */ | |
7551 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) | |
48096710 | 7552 | ice_ptp_reset(pf); |
06c16d89 | 7553 | |
43113ff7 KK |
7554 | if (ice_is_feature_supported(pf, ICE_F_GNSS)) |
7555 | ice_gnss_init(pf); | |
7556 | ||
462acf6a TN |
7557 | /* rebuild PF VSI */ |
7558 | err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); | |
0f9d5027 | 7559 | if (err) { |
462acf6a | 7560 | dev_err(dev, "PF VSI rebuild failed: %d\n", err); |
0f9d5027 AV |
7561 | goto err_vsi_rebuild; |
7562 | } | |
0b28b702 | 7563 | |
c9663f79 | 7564 | err = ice_eswitch_rebuild(pf); |
b3be918d | 7565 | if (err) { |
c9663f79 | 7566 | dev_err(dev, "Switchdev rebuild failed: %d\n", err); |
b3be918d GN |
7567 | goto err_vsi_rebuild; |
7568 | } | |
7569 | ||
fbc7b27a KP |
7570 | if (reset_type == ICE_RESET_PFR) { |
7571 | err = ice_rebuild_channels(pf); | |
7572 | if (err) { | |
7573 | dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n", | |
7574 | err); | |
7575 | goto err_vsi_rebuild; | |
7576 | } | |
7577 | } | |
7578 | ||
83af0039 HT |
7579 | /* If Flow Director is active */ |
7580 | if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { | |
7581 | err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); | |
7582 | if (err) { | |
7583 | dev_err(dev, "control VSI rebuild failed: %d\n", err); | |
7584 | goto err_vsi_rebuild; | |
7585 | } | |
7586 | ||
7587 | /* replay HW Flow Director recipes */ | |
7588 | if (hw->fdir_prof) | |
7589 | ice_fdir_replay_flows(hw); | |
7590 | ||
7591 | /* replay Flow Director filters */ | |
7592 | ice_fdir_replay_fltrs(pf); | |
28bf2672 BC |
7593 | |
7594 | ice_rebuild_arfs(pf); | |
83af0039 HT |
7595 | } |
7596 | ||
462acf6a TN |
7597 | ice_update_pf_netdev_link(pf); |
7598 | ||
7599 | /* tell the firmware we are up */ | |
2ccc1c1c TN |
7600 | err = ice_send_version(pf); |
7601 | if (err) { | |
5f87ec48 | 7602 | dev_err(dev, "Rebuild failed due to error sending driver version: %d\n", |
2ccc1c1c | 7603 | err); |
462acf6a TN |
7604 | goto err_vsi_rebuild; |
7605 | } | |
7606 | ||
7607 | ice_replay_post(hw); | |
7608 | ||
0f9d5027 | 7609 | /* if we get here, reset flow is successful */ |
7e408e07 | 7610 | clear_bit(ICE_RESET_FAILED, pf->state); |
f9f5301e DE |
7611 | |
7612 | ice_plug_aux_dev(pf); | |
3579aa86 DE |
7613 | if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) |
7614 | ice_lag_rebuild(pf); | |
77580179 JK |
7615 | |
7616 | /* Restore timestamp mode settings after VSI rebuild */ | |
7617 | ice_ptp_restore_timestamp_mode(pf); | |
0b28b702 AV |
7618 | return; |
7619 | ||
0f9d5027 | 7620 | err_vsi_rebuild: |
0f9d5027 AV |
7621 | err_sched_init_port: |
7622 | ice_sched_cleanup_all(hw); | |
7623 | err_init_ctrlq: | |
0b28b702 | 7624 | ice_shutdown_all_ctrlq(hw); |
7e408e07 | 7625 | set_bit(ICE_RESET_FAILED, pf->state); |
0b28b702 | 7626 | clear_recovery: |
0f9d5027 | 7627 | /* set this bit in PF state to control service task scheduling */ |
7e408e07 | 7628 | set_bit(ICE_NEEDS_RESTART, pf->state); |
0f9d5027 | 7629 | dev_err(dev, "Rebuild failed, unload and reload driver\n"); |
0b28b702 AV |
7630 | } |
7631 | ||
e94d4478 AV |
7632 | /** |
7633 | * ice_change_mtu - NDO callback to change the MTU | |
7634 | * @netdev: network interface device structure | |
7635 | * @new_mtu: new value for maximum frame size | |
7636 | * | |
7637 | * Returns 0 on success, negative on failure | |
7638 | */ | |
7639 | static int ice_change_mtu(struct net_device *netdev, int new_mtu) | |
7640 | { | |
7641 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
7642 | struct ice_vsi *vsi = np->vsi; | |
7643 | struct ice_pf *pf = vsi->back; | |
2fba7dc5 | 7644 | struct bpf_prog *prog; |
e94d4478 | 7645 | u8 count = 0; |
348048e7 | 7646 | int err = 0; |
e94d4478 | 7647 | |
22bef5e7 | 7648 | if (new_mtu == (int)netdev->mtu) { |
2f2da36e | 7649 | netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); |
e94d4478 AV |
7650 | return 0; |
7651 | } | |
7652 | ||
2fba7dc5 MF |
7653 | prog = vsi->xdp_prog; |
7654 | if (prog && !prog->aux->xdp_has_frags) { | |
23b44513 | 7655 | int frame_size = ice_max_xdp_frame_size(vsi); |
efc2214b MF |
7656 | |
7657 | if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { | |
7658 | netdev_err(netdev, "max MTU for XDP usage is %d\n", | |
23b44513 | 7659 | frame_size - ICE_ETH_PKT_HDR_PAD); |
efc2214b MF |
7660 | return -EINVAL; |
7661 | } | |
c61bcebd MF |
7662 | } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { |
7663 | if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { | |
7664 | netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", | |
7665 | ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); | |
7666 | return -EINVAL; | |
7667 | } | |
efc2214b MF |
7668 | } |
7669 | ||
e94d4478 AV |
7670 | /* if a reset is in progress, wait for some time for it to complete */ |
7671 | do { | |
5df7e45d | 7672 | if (ice_is_reset_in_progress(pf->state)) { |
e94d4478 AV |
7673 | count++; |
7674 | usleep_range(1000, 2000); | |
7675 | } else { | |
7676 | break; | |
7677 | } | |
7678 | ||
7679 | } while (count < 100); | |
7680 | ||
7681 | if (count == 100) { | |
2f2da36e | 7682 | netdev_err(netdev, "can't change MTU. Device is busy\n"); |
e94d4478 AV |
7683 | return -EBUSY; |
7684 | } | |
7685 | ||
22bef5e7 | 7686 | netdev->mtu = (unsigned int)new_mtu; |
b7a03457 MF |
7687 | err = ice_down_up(vsi); |
7688 | if (err) | |
7689 | return err; | |
e94d4478 | 7690 | |
bda5b7db | 7691 | netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); |
97b01291 | 7692 | set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); |
348048e7 DE |
7693 | |
7694 | return err; | |
e94d4478 AV |
7695 | } |
7696 | ||
77a78115 | 7697 | /** |
a7605370 | 7698 | * ice_eth_ioctl - Access the hwtstamp interface |
77a78115 JK |
7699 | * @netdev: network interface device structure |
7700 | * @ifr: interface request data | |
7701 | * @cmd: ioctl command | |
7702 | */ | |
a7605370 | 7703 | static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
77a78115 JK |
7704 | { |
7705 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
7706 | struct ice_pf *pf = np->vsi->back; | |
7707 | ||
7708 | switch (cmd) { | |
7709 | case SIOCGHWTSTAMP: | |
7710 | return ice_ptp_get_ts_config(pf, ifr); | |
7711 | case SIOCSHWTSTAMP: | |
7712 | return ice_ptp_set_ts_config(pf, ifr); | |
7713 | default: | |
7714 | return -EOPNOTSUPP; | |
7715 | } | |
7716 | } | |
7717 | ||
0fee3577 LY |
7718 | /** |
7719 | * ice_aq_str - convert AQ err code to a string | |
7720 | * @aq_err: the AQ error code to convert | |
7721 | */ | |
7722 | const char *ice_aq_str(enum ice_aq_err aq_err) | |
7723 | { | |
7724 | switch (aq_err) { | |
7725 | case ICE_AQ_RC_OK: | |
7726 | return "OK"; | |
7727 | case ICE_AQ_RC_EPERM: | |
7728 | return "ICE_AQ_RC_EPERM"; | |
7729 | case ICE_AQ_RC_ENOENT: | |
7730 | return "ICE_AQ_RC_ENOENT"; | |
7731 | case ICE_AQ_RC_ENOMEM: | |
7732 | return "ICE_AQ_RC_ENOMEM"; | |
7733 | case ICE_AQ_RC_EBUSY: | |
7734 | return "ICE_AQ_RC_EBUSY"; | |
7735 | case ICE_AQ_RC_EEXIST: | |
7736 | return "ICE_AQ_RC_EEXIST"; | |
7737 | case ICE_AQ_RC_EINVAL: | |
7738 | return "ICE_AQ_RC_EINVAL"; | |
7739 | case ICE_AQ_RC_ENOSPC: | |
7740 | return "ICE_AQ_RC_ENOSPC"; | |
7741 | case ICE_AQ_RC_ENOSYS: | |
7742 | return "ICE_AQ_RC_ENOSYS"; | |
b5e19a64 CC |
7743 | case ICE_AQ_RC_EMODE: |
7744 | return "ICE_AQ_RC_EMODE"; | |
0fee3577 LY |
7745 | case ICE_AQ_RC_ENOSEC: |
7746 | return "ICE_AQ_RC_ENOSEC"; | |
7747 | case ICE_AQ_RC_EBADSIG: | |
7748 | return "ICE_AQ_RC_EBADSIG"; | |
7749 | case ICE_AQ_RC_ESVN: | |
7750 | return "ICE_AQ_RC_ESVN"; | |
7751 | case ICE_AQ_RC_EBADMAN: | |
7752 | return "ICE_AQ_RC_EBADMAN"; | |
7753 | case ICE_AQ_RC_EBADBUF: | |
7754 | return "ICE_AQ_RC_EBADBUF"; | |
7755 | } | |
7756 | ||
7757 | return "ICE_AQ_RC_UNKNOWN"; | |
7758 | } | |
7759 | ||
d76a60ba | 7760 | /** |
b66a972a | 7761 | * ice_set_rss_lut - Set RSS LUT |
d76a60ba | 7762 | * @vsi: Pointer to VSI structure |
d76a60ba AV |
7763 | * @lut: Lookup table |
7764 | * @lut_size: Lookup table size | |
7765 | * | |
7766 | * Returns 0 on success, negative on failure | |
7767 | */ | |
b66a972a | 7768 | int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) |
d76a60ba | 7769 | { |
b66a972a BC |
7770 | struct ice_aq_get_set_rss_lut_params params = {}; |
7771 | struct ice_hw *hw = &vsi->back->hw; | |
5e24d598 | 7772 | int status; |
d76a60ba | 7773 | |
b66a972a BC |
7774 | if (!lut) |
7775 | return -EINVAL; | |
d76a60ba | 7776 | |
b66a972a BC |
7777 | params.vsi_handle = vsi->idx; |
7778 | params.lut_size = lut_size; | |
7779 | params.lut_type = vsi->rss_lut_type; | |
7780 | params.lut = lut; | |
d76a60ba | 7781 | |
b66a972a | 7782 | status = ice_aq_set_rss_lut(hw, ¶ms); |
c1484691 | 7783 | if (status) |
5f87ec48 | 7784 | dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", |
5518ac2a | 7785 | status, ice_aq_str(hw->adminq.sq_last_status)); |
d76a60ba | 7786 | |
c1484691 | 7787 | return status; |
b66a972a | 7788 | } |
e3c53928 | 7789 | |
b66a972a BC |
7790 | /** |
7791 | * ice_set_rss_key - Set RSS key | |
7792 | * @vsi: Pointer to the VSI structure | |
7793 | * @seed: RSS hash seed | |
7794 | * | |
7795 | * Returns 0 on success, negative on failure | |
7796 | */ | |
7797 | int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) | |
7798 | { | |
7799 | struct ice_hw *hw = &vsi->back->hw; | |
5e24d598 | 7800 | int status; |
b66a972a BC |
7801 | |
7802 | if (!seed) | |
7803 | return -EINVAL; | |
7804 | ||
7805 | status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); | |
c1484691 | 7806 | if (status) |
5f87ec48 | 7807 | dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", |
5518ac2a | 7808 | status, ice_aq_str(hw->adminq.sq_last_status)); |
d76a60ba | 7809 | |
c1484691 | 7810 | return status; |
d76a60ba AV |
7811 | } |
7812 | ||
7813 | /** | |
b66a972a | 7814 | * ice_get_rss_lut - Get RSS LUT |
d76a60ba | 7815 | * @vsi: Pointer to VSI structure |
d76a60ba AV |
7816 | * @lut: Buffer to store the lookup table entries |
7817 | * @lut_size: Size of buffer to store the lookup table entries | |
7818 | * | |
7819 | * Returns 0 on success, negative on failure | |
7820 | */ | |
b66a972a | 7821 | int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) |
d76a60ba | 7822 | { |
b66a972a BC |
7823 | struct ice_aq_get_set_rss_lut_params params = {}; |
7824 | struct ice_hw *hw = &vsi->back->hw; | |
5e24d598 | 7825 | int status; |
d76a60ba | 7826 | |
b66a972a BC |
7827 | if (!lut) |
7828 | return -EINVAL; | |
d76a60ba | 7829 | |
b66a972a BC |
7830 | params.vsi_handle = vsi->idx; |
7831 | params.lut_size = lut_size; | |
7832 | params.lut_type = vsi->rss_lut_type; | |
7833 | params.lut = lut; | |
7834 | ||
7835 | status = ice_aq_get_rss_lut(hw, ¶ms); | |
c1484691 | 7836 | if (status) |
5f87ec48 | 7837 | dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", |
5518ac2a | 7838 | status, ice_aq_str(hw->adminq.sq_last_status)); |
d76a60ba | 7839 | |
c1484691 | 7840 | return status; |
b66a972a | 7841 | } |
e3c53928 | 7842 | |
b66a972a BC |
7843 | /** |
7844 | * ice_get_rss_key - Get RSS key | |
7845 | * @vsi: Pointer to VSI structure | |
7846 | * @seed: Buffer to store the key in | |
7847 | * | |
7848 | * Returns 0 on success, negative on failure | |
7849 | */ | |
7850 | int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) | |
7851 | { | |
7852 | struct ice_hw *hw = &vsi->back->hw; | |
5e24d598 | 7853 | int status; |
b66a972a BC |
7854 | |
7855 | if (!seed) | |
7856 | return -EINVAL; | |
7857 | ||
7858 | status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); | |
c1484691 | 7859 | if (status) |
5f87ec48 | 7860 | dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", |
5518ac2a | 7861 | status, ice_aq_str(hw->adminq.sq_last_status)); |
d76a60ba | 7862 | |
c1484691 | 7863 | return status; |
d76a60ba AV |
7864 | } |
7865 | ||
352e9bf2 JG |
7866 | /** |
7867 | * ice_set_rss_hfunc - Set RSS HASH function | |
7868 | * @vsi: Pointer to VSI structure | |
7869 | * @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*) | |
7870 | * | |
7871 | * Returns 0 on success, negative on failure | |
7872 | */ | |
7873 | int ice_set_rss_hfunc(struct ice_vsi *vsi, u8 hfunc) | |
7874 | { | |
7875 | struct ice_hw *hw = &vsi->back->hw; | |
7876 | struct ice_vsi_ctx *ctx; | |
7877 | bool symm; | |
7878 | int err; | |
7879 | ||
7880 | if (hfunc == vsi->rss_hfunc) | |
7881 | return 0; | |
7882 | ||
7883 | if (hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ && | |
7884 | hfunc != ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ) | |
7885 | return -EOPNOTSUPP; | |
7886 | ||
7887 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | |
7888 | if (!ctx) | |
7889 | return -ENOMEM; | |
7890 | ||
7891 | ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); | |
7892 | ctx->info.q_opt_rss = vsi->info.q_opt_rss; | |
7893 | ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; | |
7894 | ctx->info.q_opt_rss |= | |
7895 | FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hfunc); | |
7896 | ctx->info.q_opt_tc = vsi->info.q_opt_tc; | |
7897 | ctx->info.q_opt_flags = vsi->info.q_opt_rss; | |
7898 | ||
7899 | err = ice_update_vsi(hw, vsi->idx, ctx, NULL); | |
7900 | if (err) { | |
7901 | dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", | |
7902 | vsi->vsi_num, err); | |
7903 | } else { | |
7904 | vsi->info.q_opt_rss = ctx->info.q_opt_rss; | |
7905 | vsi->rss_hfunc = hfunc; | |
7906 | netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", | |
7907 | hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ ? | |
7908 | "Symmetric " : ""); | |
7909 | } | |
7910 | kfree(ctx); | |
7911 | if (err) | |
7912 | return err; | |
7913 | ||
7914 | /* Fix the symmetry setting for all existing RSS configurations */ | |
7915 | symm = !!(hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ); | |
7916 | return ice_set_rss_cfg_symm(hw, vsi, symm); | |
7917 | } | |
7918 | ||
b1edc14a MFIP |
7919 | /** |
7920 | * ice_bridge_getlink - Get the hardware bridge mode | |
7921 | * @skb: skb buff | |
f9867df6 | 7922 | * @pid: process ID |
b1edc14a MFIP |
7923 | * @seq: RTNL message seq |
7924 | * @dev: the netdev being configured | |
7925 | * @filter_mask: filter mask passed in | |
7926 | * @nlflags: netlink flags passed in | |
7927 | * | |
7928 | * Return the bridge mode (VEB/VEPA) | |
7929 | */ | |
7930 | static int | |
7931 | ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |
7932 | struct net_device *dev, u32 filter_mask, int nlflags) | |
7933 | { | |
7934 | struct ice_netdev_priv *np = netdev_priv(dev); | |
7935 | struct ice_vsi *vsi = np->vsi; | |
7936 | struct ice_pf *pf = vsi->back; | |
7937 | u16 bmode; | |
7938 | ||
7939 | bmode = pf->first_sw->bridge_mode; | |
7940 | ||
7941 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, | |
7942 | filter_mask, NULL); | |
7943 | } | |
7944 | ||
7945 | /** | |
7946 | * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) | |
7947 | * @vsi: Pointer to VSI structure | |
7948 | * @bmode: Hardware bridge mode (VEB/VEPA) | |
7949 | * | |
7950 | * Returns 0 on success, negative on failure | |
7951 | */ | |
7952 | static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) | |
7953 | { | |
b1edc14a MFIP |
7954 | struct ice_aqc_vsi_props *vsi_props; |
7955 | struct ice_hw *hw = &vsi->back->hw; | |
198a666a | 7956 | struct ice_vsi_ctx *ctxt; |
2ccc1c1c | 7957 | int ret; |
b1edc14a MFIP |
7958 | |
7959 | vsi_props = &vsi->info; | |
198a666a | 7960 | |
9efe35d0 | 7961 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
198a666a BA |
7962 | if (!ctxt) |
7963 | return -ENOMEM; | |
7964 | ||
7965 | ctxt->info = vsi->info; | |
b1edc14a MFIP |
7966 | |
7967 | if (bmode == BRIDGE_MODE_VEB) | |
7968 | /* change from VEPA to VEB mode */ | |
198a666a | 7969 | ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
b1edc14a MFIP |
7970 | else |
7971 | /* change from VEB to VEPA mode */ | |
198a666a BA |
7972 | ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
7973 | ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); | |
5726ca0e | 7974 | |
2ccc1c1c TN |
7975 | ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
7976 | if (ret) { | |
5f87ec48 | 7977 | dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n", |
2ccc1c1c | 7978 | bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); |
198a666a | 7979 | goto out; |
b1edc14a MFIP |
7980 | } |
7981 | /* Update sw flags for book keeping */ | |
198a666a | 7982 | vsi_props->sw_flags = ctxt->info.sw_flags; |
b1edc14a | 7983 | |
198a666a | 7984 | out: |
9efe35d0 | 7985 | kfree(ctxt); |
198a666a | 7986 | return ret; |
b1edc14a MFIP |
7987 | } |
7988 | ||
7989 | /** | |
7990 | * ice_bridge_setlink - Set the hardware bridge mode | |
7991 | * @dev: the netdev being configured | |
7992 | * @nlh: RTNL message | |
7993 | * @flags: bridge setlink flags | |
2fd527b7 | 7994 | * @extack: netlink extended ack |
b1edc14a MFIP |
7995 | * |
7996 | * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is | |
7997 | * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if | |
7998 | * not already set for all VSIs connected to this switch. And also update the | |
7999 | * unicast switch filter rules for the corresponding switch of the netdev. | |
8000 | */ | |
8001 | static int | |
8002 | ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, | |
3d505147 BA |
8003 | u16 __always_unused flags, |
8004 | struct netlink_ext_ack __always_unused *extack) | |
b1edc14a MFIP |
8005 | { |
8006 | struct ice_netdev_priv *np = netdev_priv(dev); | |
8007 | struct ice_pf *pf = np->vsi->back; | |
8008 | struct nlattr *attr, *br_spec; | |
8009 | struct ice_hw *hw = &pf->hw; | |
b1edc14a MFIP |
8010 | struct ice_sw *pf_sw; |
8011 | int rem, v, err = 0; | |
8012 | ||
8013 | pf_sw = pf->first_sw; | |
8014 | /* find the attribute in the netlink message */ | |
8015 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); | |
06e456a0 RD |
8016 | if (!br_spec) |
8017 | return -EINVAL; | |
b1edc14a MFIP |
8018 | |
8019 | nla_for_each_nested(attr, br_spec, rem) { | |
8020 | __u16 mode; | |
8021 | ||
8022 | if (nla_type(attr) != IFLA_BRIDGE_MODE) | |
8023 | continue; | |
8024 | mode = nla_get_u16(attr); | |
8025 | if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) | |
8026 | return -EINVAL; | |
8027 | /* Continue if bridge mode is not being flipped */ | |
8028 | if (mode == pf_sw->bridge_mode) | |
8029 | continue; | |
8030 | /* Iterates through the PF VSI list and update the loopback | |
8031 | * mode of the VSI | |
8032 | */ | |
8033 | ice_for_each_vsi(pf, v) { | |
8034 | if (!pf->vsi[v]) | |
8035 | continue; | |
8036 | err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); | |
8037 | if (err) | |
8038 | return err; | |
8039 | } | |
8040 | ||
8041 | hw->evb_veb = (mode == BRIDGE_MODE_VEB); | |
8042 | /* Update the unicast switch filter rules for the corresponding | |
8043 | * switch of the netdev | |
8044 | */ | |
2ccc1c1c TN |
8045 | err = ice_update_sw_rule_bridge_mode(hw); |
8046 | if (err) { | |
5f87ec48 | 8047 | netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n", |
2ccc1c1c | 8048 | mode, err, |
0fee3577 | 8049 | ice_aq_str(hw->adminq.sq_last_status)); |
b1edc14a MFIP |
8050 | /* revert hw->evb_veb */ |
8051 | hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); | |
c1484691 | 8052 | return err; |
b1edc14a MFIP |
8053 | } |
8054 | ||
8055 | pf_sw->bridge_mode = mode; | |
8056 | } | |
8057 | ||
8058 | return 0; | |
8059 | } | |
8060 | ||
b3969fd7 SM |
8061 | /** |
8062 | * ice_tx_timeout - Respond to a Tx Hang | |
8063 | * @netdev: network interface device structure | |
644f40ea | 8064 | * @txqueue: Tx queue |
b3969fd7 | 8065 | */ |
0290bd29 | 8066 | static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
b3969fd7 SM |
8067 | { |
8068 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
e72bba21 | 8069 | struct ice_tx_ring *tx_ring = NULL; |
b3969fd7 SM |
8070 | struct ice_vsi *vsi = np->vsi; |
8071 | struct ice_pf *pf = vsi->back; | |
807bc98d | 8072 | u32 i; |
b3969fd7 SM |
8073 | |
8074 | pf->tx_timeout_count++; | |
8075 | ||
610ed0e9 AJ |
8076 | /* Check if PFC is enabled for the TC to which the queue belongs |
8077 | * to. If yes then Tx timeout is not caused by a hung queue, no | |
8078 | * need to reset and rebuild | |
8079 | */ | |
8080 | if (ice_is_pfc_causing_hung_q(pf, txqueue)) { | |
8081 | dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", | |
8082 | txqueue); | |
8083 | return; | |
8084 | } | |
8085 | ||
ed5a3f66 | 8086 | /* now that we have an index, find the tx_ring struct */ |
2faf63b6 | 8087 | ice_for_each_txq(vsi, i) |
ed5a3f66 JF |
8088 | if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) |
8089 | if (txqueue == vsi->tx_rings[i]->q_index) { | |
8090 | tx_ring = vsi->tx_rings[i]; | |
8091 | break; | |
8092 | } | |
b3969fd7 SM |
8093 | |
8094 | /* Reset recovery level if enough time has elapsed after last timeout. | |
8095 | * Also ensure no new reset action happens before next timeout period. | |
8096 | */ | |
8097 | if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) | |
8098 | pf->tx_timeout_recovery_level = 1; | |
8099 | else if (time_before(jiffies, (pf->tx_timeout_last_recovery + | |
8100 | netdev->watchdog_timeo))) | |
8101 | return; | |
8102 | ||
8103 | if (tx_ring) { | |
807bc98d BC |
8104 | struct ice_hw *hw = &pf->hw; |
8105 | u32 head, val = 0; | |
8106 | ||
5a259f8e JB |
8107 | head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, |
8108 | rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); | |
b3969fd7 | 8109 | /* Read interrupt register */ |
ba880734 | 8110 | val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); |
b3969fd7 | 8111 | |
93ff4858 | 8112 | netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", |
ed5a3f66 | 8113 | vsi->vsi_num, txqueue, tx_ring->next_to_clean, |
807bc98d | 8114 | head, tx_ring->next_to_use, val); |
b3969fd7 SM |
8115 | } |
8116 | ||
8117 | pf->tx_timeout_last_recovery = jiffies; | |
93ff4858 | 8118 | netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", |
ed5a3f66 | 8119 | pf->tx_timeout_recovery_level, txqueue); |
b3969fd7 SM |
8120 | |
8121 | switch (pf->tx_timeout_recovery_level) { | |
8122 | case 1: | |
7e408e07 | 8123 | set_bit(ICE_PFR_REQ, pf->state); |
b3969fd7 SM |
8124 | break; |
8125 | case 2: | |
7e408e07 | 8126 | set_bit(ICE_CORER_REQ, pf->state); |
b3969fd7 SM |
8127 | break; |
8128 | case 3: | |
7e408e07 | 8129 | set_bit(ICE_GLOBR_REQ, pf->state); |
b3969fd7 SM |
8130 | break; |
8131 | default: | |
8132 | netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); | |
7e408e07 | 8133 | set_bit(ICE_DOWN, pf->state); |
e97fb1ae | 8134 | set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); |
7e408e07 | 8135 | set_bit(ICE_SERVICE_DIS, pf->state); |
b3969fd7 SM |
8136 | break; |
8137 | } | |
8138 | ||
8139 | ice_service_task_schedule(pf); | |
8140 | pf->tx_timeout_recovery_level++; | |
8141 | } | |
8142 | ||
0d08a441 KP |
8143 | /** |
8144 | * ice_setup_tc_cls_flower - flower classifier offloads | |
8145 | * @np: net device to configure | |
8146 | * @filter_dev: device on which filter is added | |
8147 | * @cls_flower: offload data | |
8148 | */ | |
8149 | static int | |
8150 | ice_setup_tc_cls_flower(struct ice_netdev_priv *np, | |
8151 | struct net_device *filter_dev, | |
8152 | struct flow_cls_offload *cls_flower) | |
8153 | { | |
8154 | struct ice_vsi *vsi = np->vsi; | |
8155 | ||
8156 | if (cls_flower->common.chain_index) | |
8157 | return -EOPNOTSUPP; | |
8158 | ||
8159 | switch (cls_flower->command) { | |
8160 | case FLOW_CLS_REPLACE: | |
8161 | return ice_add_cls_flower(filter_dev, vsi, cls_flower); | |
8162 | case FLOW_CLS_DESTROY: | |
8163 | return ice_del_cls_flower(vsi, cls_flower); | |
8164 | default: | |
8165 | return -EINVAL; | |
8166 | } | |
8167 | } | |
8168 | ||
8169 | /** | |
8170 | * ice_setup_tc_block_cb - callback handler registered for TC block | |
8171 | * @type: TC SETUP type | |
8172 | * @type_data: TC flower offload data that contains user input | |
8173 | * @cb_priv: netdev private data | |
8174 | */ | |
8175 | static int | |
8176 | ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) | |
8177 | { | |
8178 | struct ice_netdev_priv *np = cb_priv; | |
8179 | ||
8180 | switch (type) { | |
8181 | case TC_SETUP_CLSFLOWER: | |
8182 | return ice_setup_tc_cls_flower(np, np->vsi->netdev, | |
8183 | type_data); | |
8184 | default: | |
8185 | return -EOPNOTSUPP; | |
8186 | } | |
8187 | } | |
8188 | ||
fbc7b27a KP |
8189 | /** |
8190 | * ice_validate_mqprio_qopt - Validate TCF input parameters | |
8191 | * @vsi: Pointer to VSI | |
8192 | * @mqprio_qopt: input parameters for mqprio queue configuration | |
8193 | * | |
8194 | * This function validates MQPRIO params, such as qcount (power of 2 wherever | |
8195 | * needed), and make sure user doesn't specify qcount and BW rate limit | |
8196 | * for TCs, which are more than "num_tc" | |
8197 | */ | |
8198 | static int | |
8199 | ice_validate_mqprio_qopt(struct ice_vsi *vsi, | |
8200 | struct tc_mqprio_qopt_offload *mqprio_qopt) | |
8201 | { | |
fbc7b27a KP |
8202 | int non_power_of_2_qcount = 0; |
8203 | struct ice_pf *pf = vsi->back; | |
8204 | int max_rss_q_cnt = 0; | |
5f16da6e | 8205 | u64 sum_min_rate = 0; |
fbc7b27a KP |
8206 | struct device *dev; |
8207 | int i, speed; | |
8208 | u8 num_tc; | |
8209 | ||
8210 | if (vsi->type != ICE_VSI_PF) | |
8211 | return -EINVAL; | |
8212 | ||
8213 | if (mqprio_qopt->qopt.offset[0] != 0 || | |
8214 | mqprio_qopt->qopt.num_tc < 1 || | |
8215 | mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) | |
8216 | return -EINVAL; | |
8217 | ||
8218 | dev = ice_pf_to_dev(pf); | |
8219 | vsi->ch_rss_size = 0; | |
8220 | num_tc = mqprio_qopt->qopt.num_tc; | |
5f16da6e | 8221 | speed = ice_get_link_speed_kbps(vsi); |
fbc7b27a KP |
8222 | |
8223 | for (i = 0; num_tc; i++) { | |
8224 | int qcount = mqprio_qopt->qopt.count[i]; | |
8225 | u64 max_rate, min_rate, rem; | |
8226 | ||
8227 | if (!qcount) | |
8228 | return -EINVAL; | |
8229 | ||
8230 | if (is_power_of_2(qcount)) { | |
8231 | if (non_power_of_2_qcount && | |
8232 | qcount > non_power_of_2_qcount) { | |
8233 | dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n", | |
8234 | qcount, non_power_of_2_qcount); | |
8235 | return -EINVAL; | |
8236 | } | |
8237 | if (qcount > max_rss_q_cnt) | |
8238 | max_rss_q_cnt = qcount; | |
8239 | } else { | |
8240 | if (non_power_of_2_qcount && | |
8241 | qcount != non_power_of_2_qcount) { | |
8242 | dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n", | |
8243 | qcount, non_power_of_2_qcount); | |
8244 | return -EINVAL; | |
8245 | } | |
8246 | if (qcount < max_rss_q_cnt) { | |
8247 | dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n", | |
8248 | qcount, max_rss_q_cnt); | |
8249 | return -EINVAL; | |
8250 | } | |
8251 | max_rss_q_cnt = qcount; | |
8252 | non_power_of_2_qcount = qcount; | |
8253 | } | |
8254 | ||
8255 | /* TC command takes input in K/N/Gbps or K/M/Gbit etc but | |
8256 | * converts the bandwidth rate limit into Bytes/s when | |
8257 | * passing it down to the driver. So convert input bandwidth | |
8258 | * from Bytes/s to Kbps | |
8259 | */ | |
8260 | max_rate = mqprio_qopt->max_rate[i]; | |
8261 | max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); | |
fbc7b27a KP |
8262 | |
8263 | /* min_rate is minimum guaranteed rate and it can't be zero */ | |
8264 | min_rate = mqprio_qopt->min_rate[i]; | |
8265 | min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR); | |
8266 | sum_min_rate += min_rate; | |
8267 | ||
8268 | if (min_rate && min_rate < ICE_MIN_BW_LIMIT) { | |
8269 | dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i, | |
8270 | min_rate, ICE_MIN_BW_LIMIT); | |
8271 | return -EINVAL; | |
8272 | } | |
8273 | ||
5f16da6e SS |
8274 | if (max_rate && max_rate > speed) { |
8275 | dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n", | |
8276 | i, max_rate, speed); | |
8277 | return -EINVAL; | |
8278 | } | |
8279 | ||
fbc7b27a KP |
8280 | iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); |
8281 | if (rem) { | |
8282 | dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", | |
8283 | i, ICE_MIN_BW_LIMIT); | |
8284 | return -EINVAL; | |
8285 | } | |
8286 | ||
8287 | iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem); | |
8288 | if (rem) { | |
8289 | dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps", | |
8290 | i, ICE_MIN_BW_LIMIT); | |
8291 | return -EINVAL; | |
8292 | } | |
8293 | ||
8294 | /* min_rate can't be more than max_rate, except when max_rate | |
8295 | * is zero (implies max_rate sought is max line rate). In such | |
8296 | * a case min_rate can be more than max. | |
8297 | */ | |
8298 | if (max_rate && min_rate > max_rate) { | |
8299 | dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n", | |
8300 | min_rate, max_rate); | |
8301 | return -EINVAL; | |
8302 | } | |
8303 | ||
8304 | if (i >= mqprio_qopt->qopt.num_tc - 1) | |
8305 | break; | |
8306 | if (mqprio_qopt->qopt.offset[i + 1] != | |
8307 | (mqprio_qopt->qopt.offset[i] + qcount)) | |
8308 | return -EINVAL; | |
8309 | } | |
8310 | if (vsi->num_rxq < | |
8311 | (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) | |
8312 | return -EINVAL; | |
8313 | if (vsi->num_txq < | |
8314 | (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) | |
8315 | return -EINVAL; | |
8316 | ||
fbc7b27a KP |
8317 | if (sum_min_rate && sum_min_rate > (u64)speed) { |
8318 | dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", | |
8319 | sum_min_rate, speed); | |
8320 | return -EINVAL; | |
8321 | } | |
8322 | ||
8323 | /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ | |
8324 | vsi->ch_rss_size = max_rss_q_cnt; | |
8325 | ||
8326 | return 0; | |
8327 | } | |
8328 | ||
40319796 KP |
8329 | /** |
8330 | * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF | |
8331 | * @pf: ptr to PF device | |
8332 | * @vsi: ptr to VSI | |
8333 | */ | |
8334 | static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi) | |
8335 | { | |
8336 | struct device *dev = ice_pf_to_dev(pf); | |
8337 | bool added = false; | |
8338 | struct ice_hw *hw; | |
8339 | int flow; | |
8340 | ||
8341 | if (!(vsi->num_gfltr || vsi->num_bfltr)) | |
8342 | return -EINVAL; | |
8343 | ||
8344 | hw = &pf->hw; | |
8345 | for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) { | |
8346 | struct ice_fd_hw_prof *prof; | |
8347 | int tun, status; | |
8348 | u64 entry_h; | |
8349 | ||
8350 | if (!(hw->fdir_prof && hw->fdir_prof[flow] && | |
8351 | hw->fdir_prof[flow]->cnt)) | |
8352 | continue; | |
8353 | ||
8354 | for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) { | |
8355 | enum ice_flow_priority prio; | |
40319796 KP |
8356 | |
8357 | /* add this VSI to FDir profile for this flow */ | |
8358 | prio = ICE_FLOW_PRIO_NORMAL; | |
8359 | prof = hw->fdir_prof[flow]; | |
b1f5921a AZ |
8360 | status = ice_flow_add_entry(hw, ICE_BLK_FD, |
8361 | prof->prof_id[tun], | |
40319796 KP |
8362 | prof->vsi_h[0], vsi->idx, |
8363 | prio, prof->fdir_seg[tun], | |
8364 | &entry_h); | |
8365 | if (status) { | |
8366 | dev_err(dev, "channel VSI idx %d, not able to add to group %d\n", | |
8367 | vsi->idx, flow); | |
8368 | continue; | |
8369 | } | |
8370 | ||
8371 | prof->entry_h[prof->cnt][tun] = entry_h; | |
8372 | } | |
8373 | ||
8374 | /* store VSI for filter replay and delete */ | |
8375 | prof->vsi_h[prof->cnt] = vsi->idx; | |
8376 | prof->cnt++; | |
8377 | ||
8378 | added = true; | |
8379 | dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, | |
8380 | flow); | |
8381 | } | |
8382 | ||
8383 | if (!added) | |
8384 | dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); | |
8385 | ||
8386 | return 0; | |
8387 | } | |
8388 | ||
fbc7b27a KP |
8389 | /** |
8390 | * ice_add_channel - add a channel by adding VSI | |
8391 | * @pf: ptr to PF device | |
8392 | * @sw_id: underlying HW switching element ID | |
8393 | * @ch: ptr to channel structure | |
8394 | * | |
8395 | * Add a channel (VSI) using add_vsi and queue_map | |
8396 | */ | |
8397 | static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch) | |
8398 | { | |
8399 | struct device *dev = ice_pf_to_dev(pf); | |
8400 | struct ice_vsi *vsi; | |
8401 | ||
8402 | if (ch->type != ICE_VSI_CHNL) { | |
8403 | dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); | |
8404 | return -EINVAL; | |
8405 | } | |
8406 | ||
8407 | vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); | |
8408 | if (!vsi || vsi->type != ICE_VSI_CHNL) { | |
8409 | dev_err(dev, "create chnl VSI failure\n"); | |
8410 | return -EINVAL; | |
8411 | } | |
8412 | ||
40319796 KP |
8413 | ice_add_vsi_to_fdir(pf, vsi); |
8414 | ||
fbc7b27a KP |
8415 | ch->sw_id = sw_id; |
8416 | ch->vsi_num = vsi->vsi_num; | |
8417 | ch->info.mapping_flags = vsi->info.mapping_flags; | |
8418 | ch->ch_vsi = vsi; | |
8419 | /* set the back pointer of channel for newly created VSI */ | |
8420 | vsi->ch = ch; | |
8421 | ||
8422 | memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, | |
8423 | sizeof(vsi->info.q_mapping)); | |
8424 | memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, | |
8425 | sizeof(vsi->info.tc_mapping)); | |
8426 | ||
8427 | return 0; | |
8428 | } | |
8429 | ||
8430 | /** | |
8431 | * ice_chnl_cfg_res | |
8432 | * @vsi: the VSI being setup | |
8433 | * @ch: ptr to channel structure | |
8434 | * | |
8435 | * Configure channel specific resources such as rings, vector. | |
8436 | */ | |
8437 | static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch) | |
8438 | { | |
8439 | int i; | |
8440 | ||
8441 | for (i = 0; i < ch->num_txq; i++) { | |
8442 | struct ice_q_vector *tx_q_vector, *rx_q_vector; | |
8443 | struct ice_ring_container *rc; | |
8444 | struct ice_tx_ring *tx_ring; | |
8445 | struct ice_rx_ring *rx_ring; | |
8446 | ||
8447 | tx_ring = vsi->tx_rings[ch->base_q + i]; | |
8448 | rx_ring = vsi->rx_rings[ch->base_q + i]; | |
8449 | if (!tx_ring || !rx_ring) | |
8450 | continue; | |
8451 | ||
8452 | /* setup ring being channel enabled */ | |
8453 | tx_ring->ch = ch; | |
8454 | rx_ring->ch = ch; | |
8455 | ||
8456 | /* following code block sets up vector specific attributes */ | |
8457 | tx_q_vector = tx_ring->q_vector; | |
8458 | rx_q_vector = rx_ring->q_vector; | |
8459 | if (!tx_q_vector && !rx_q_vector) | |
8460 | continue; | |
8461 | ||
8462 | if (tx_q_vector) { | |
8463 | tx_q_vector->ch = ch; | |
8464 | /* setup Tx and Rx ITR setting if DIM is off */ | |
8465 | rc = &tx_q_vector->tx; | |
8466 | if (!ITR_IS_DYNAMIC(rc)) | |
8467 | ice_write_itr(rc, rc->itr_setting); | |
8468 | } | |
8469 | if (rx_q_vector) { | |
8470 | rx_q_vector->ch = ch; | |
8471 | /* setup Tx and Rx ITR setting if DIM is off */ | |
8472 | rc = &rx_q_vector->rx; | |
8473 | if (!ITR_IS_DYNAMIC(rc)) | |
8474 | ice_write_itr(rc, rc->itr_setting); | |
8475 | } | |
8476 | } | |
8477 | ||
8478 | /* it is safe to assume that, if channel has non-zero num_t[r]xq, then | |
8479 | * GLINT_ITR register would have written to perform in-context | |
8480 | * update, hence perform flush | |
8481 | */ | |
8482 | if (ch->num_txq || ch->num_rxq) | |
8483 | ice_flush(&vsi->back->hw); | |
8484 | } | |
8485 | ||
8486 | /** | |
8487 | * ice_cfg_chnl_all_res - configure channel resources | |
8488 | * @vsi: pte to main_vsi | |
8489 | * @ch: ptr to channel structure | |
8490 | * | |
8491 | * This function configures channel specific resources such as flow-director | |
8492 | * counter index, and other resources such as queues, vectors, ITR settings | |
8493 | */ | |
8494 | static void | |
8495 | ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch) | |
8496 | { | |
8497 | /* configure channel (aka ADQ) resources such as queues, vectors, | |
8498 | * ITR settings for channel specific vectors and anything else | |
8499 | */ | |
8500 | ice_chnl_cfg_res(vsi, ch); | |
8501 | } | |
8502 | ||
8503 | /** | |
8504 | * ice_setup_hw_channel - setup new channel | |
8505 | * @pf: ptr to PF device | |
8506 | * @vsi: the VSI being setup | |
8507 | * @ch: ptr to channel structure | |
8508 | * @sw_id: underlying HW switching element ID | |
8509 | * @type: type of channel to be created (VMDq2/VF) | |
8510 | * | |
8511 | * Setup new channel (VSI) based on specified type (VMDq2/VF) | |
8512 | * and configures Tx rings accordingly | |
8513 | */ | |
8514 | static int | |
8515 | ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi, | |
8516 | struct ice_channel *ch, u16 sw_id, u8 type) | |
8517 | { | |
8518 | struct device *dev = ice_pf_to_dev(pf); | |
8519 | int ret; | |
8520 | ||
8521 | ch->base_q = vsi->next_base_q; | |
8522 | ch->type = type; | |
8523 | ||
8524 | ret = ice_add_channel(pf, sw_id, ch); | |
8525 | if (ret) { | |
8526 | dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id); | |
8527 | return ret; | |
8528 | } | |
8529 | ||
8530 | /* configure/setup ADQ specific resources */ | |
8531 | ice_cfg_chnl_all_res(vsi, ch); | |
8532 | ||
8533 | /* make sure to update the next_base_q so that subsequent channel's | |
8534 | * (aka ADQ) VSI queue map is correct | |
8535 | */ | |
8536 | vsi->next_base_q = vsi->next_base_q + ch->num_rxq; | |
8537 | dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, | |
8538 | ch->num_rxq); | |
8539 | ||
8540 | return 0; | |
8541 | } | |
8542 | ||
8543 | /** | |
8544 | * ice_setup_channel - setup new channel using uplink element | |
8545 | * @pf: ptr to PF device | |
8546 | * @vsi: the VSI being setup | |
8547 | * @ch: ptr to channel structure | |
8548 | * | |
8549 | * Setup new channel (VSI) based on specified type (VMDq2/VF) | |
8550 | * and uplink switching element | |
8551 | */ | |
8552 | static bool | |
8553 | ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi, | |
8554 | struct ice_channel *ch) | |
8555 | { | |
8556 | struct device *dev = ice_pf_to_dev(pf); | |
8557 | u16 sw_id; | |
8558 | int ret; | |
8559 | ||
8560 | if (vsi->type != ICE_VSI_PF) { | |
8561 | dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); | |
8562 | return false; | |
8563 | } | |
8564 | ||
8565 | sw_id = pf->first_sw->sw_id; | |
8566 | ||
8567 | /* create channel (VSI) */ | |
8568 | ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL); | |
8569 | if (ret) { | |
8570 | dev_err(dev, "failed to setup hw_channel\n"); | |
8571 | return false; | |
8572 | } | |
8573 | dev_dbg(dev, "successfully created channel()\n"); | |
8574 | ||
8575 | return ch->ch_vsi ? true : false; | |
8576 | } | |
8577 | ||
8578 | /** | |
8579 | * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate | |
8580 | * @vsi: VSI to be configured | |
8581 | * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit | |
8582 | * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit | |
8583 | */ | |
8584 | static int | |
8585 | ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate) | |
8586 | { | |
8587 | int err; | |
8588 | ||
8589 | err = ice_set_min_bw_limit(vsi, min_tx_rate); | |
8590 | if (err) | |
8591 | return err; | |
8592 | ||
8593 | return ice_set_max_bw_limit(vsi, max_tx_rate); | |
8594 | } | |
8595 | ||
8596 | /** | |
8597 | * ice_create_q_channel - function to create channel | |
8598 | * @vsi: VSI to be configured | |
8599 | * @ch: ptr to channel (it contains channel specific params) | |
8600 | * | |
8601 | * This function creates channel (VSI) using num_queues specified by user, | |
8602 | * reconfigs RSS if needed. | |
8603 | */ | |
8604 | static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch) | |
8605 | { | |
8606 | struct ice_pf *pf = vsi->back; | |
8607 | struct device *dev; | |
8608 | ||
8609 | if (!ch) | |
8610 | return -EINVAL; | |
8611 | ||
8612 | dev = ice_pf_to_dev(pf); | |
8613 | if (!ch->num_txq || !ch->num_rxq) { | |
8614 | dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); | |
8615 | return -EINVAL; | |
8616 | } | |
8617 | ||
8618 | if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { | |
8619 | dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n", | |
8620 | vsi->cnt_q_avail, ch->num_txq); | |
8621 | return -EINVAL; | |
8622 | } | |
8623 | ||
8624 | if (!ice_setup_channel(pf, vsi, ch)) { | |
8625 | dev_info(dev, "Failed to setup channel\n"); | |
8626 | return -EINVAL; | |
8627 | } | |
8628 | /* configure BW rate limit */ | |
8629 | if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { | |
8630 | int ret; | |
8631 | ||
8632 | ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, | |
8633 | ch->min_tx_rate); | |
8634 | if (ret) | |
8635 | dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n", | |
8636 | ch->max_tx_rate, ch->ch_vsi->vsi_num); | |
8637 | else | |
8638 | dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n", | |
8639 | ch->max_tx_rate, ch->ch_vsi->vsi_num); | |
8640 | } | |
8641 | ||
8642 | vsi->cnt_q_avail -= ch->num_txq; | |
8643 | ||
8644 | return 0; | |
8645 | } | |
8646 | ||
9fea7498 KP |
8647 | /** |
8648 | * ice_rem_all_chnl_fltrs - removes all channel filters | |
8649 | * @pf: ptr to PF, TC-flower based filter are tracked at PF level | |
8650 | * | |
8651 | * Remove all advanced switch filters only if they are channel specific | |
8652 | * tc-flower based filter | |
8653 | */ | |
8654 | static void ice_rem_all_chnl_fltrs(struct ice_pf *pf) | |
8655 | { | |
8656 | struct ice_tc_flower_fltr *fltr; | |
8657 | struct hlist_node *node; | |
8658 | ||
8659 | /* to remove all channel filters, iterate an ordered list of filters */ | |
8660 | hlist_for_each_entry_safe(fltr, node, | |
8661 | &pf->tc_flower_fltr_list, | |
8662 | tc_flower_node) { | |
8663 | struct ice_rule_query_data rule; | |
8664 | int status; | |
8665 | ||
8666 | /* for now process only channel specific filters */ | |
8667 | if (!ice_is_chnl_fltr(fltr)) | |
8668 | continue; | |
8669 | ||
8670 | rule.rid = fltr->rid; | |
8671 | rule.rule_id = fltr->rule_id; | |
143b86f3 | 8672 | rule.vsi_handle = fltr->dest_vsi_handle; |
9fea7498 KP |
8673 | status = ice_rem_adv_rule_by_id(&pf->hw, &rule); |
8674 | if (status) { | |
8675 | if (status == -ENOENT) | |
8676 | dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n", | |
8677 | rule.rule_id); | |
8678 | else | |
8679 | dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n", | |
8680 | status); | |
8681 | } else if (fltr->dest_vsi) { | |
8682 | /* update advanced switch filter count */ | |
8683 | if (fltr->dest_vsi->type == ICE_VSI_CHNL) { | |
8684 | u32 flags = fltr->flags; | |
8685 | ||
8686 | fltr->dest_vsi->num_chnl_fltr--; | |
8687 | if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | | |
8688 | ICE_TC_FLWR_FIELD_ENC_DST_MAC)) | |
8689 | pf->num_dmac_chnl_fltrs--; | |
8690 | } | |
8691 | } | |
8692 | ||
8693 | hlist_del(&fltr->tc_flower_node); | |
8694 | kfree(fltr); | |
8695 | } | |
8696 | } | |
8697 | ||
fbc7b27a KP |
8698 | /** |
8699 | * ice_remove_q_channels - Remove queue channels for the TCs | |
8700 | * @vsi: VSI to be configured | |
8701 | * @rem_fltr: delete advanced switch filter or not | |
8702 | * | |
8703 | * Remove queue channels for the TCs | |
8704 | */ | |
9fea7498 | 8705 | static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) |
fbc7b27a KP |
8706 | { |
8707 | struct ice_channel *ch, *ch_tmp; | |
9fea7498 | 8708 | struct ice_pf *pf = vsi->back; |
fbc7b27a KP |
8709 | int i; |
8710 | ||
9fea7498 KP |
8711 | /* remove all tc-flower based filter if they are channel filters only */ |
8712 | if (rem_fltr) | |
8713 | ice_rem_all_chnl_fltrs(pf); | |
8714 | ||
40319796 KP |
8715 | /* remove ntuple filters since queue configuration is being changed */ |
8716 | if (vsi->netdev->features & NETIF_F_NTUPLE) { | |
8717 | struct ice_hw *hw = &pf->hw; | |
8718 | ||
8719 | mutex_lock(&hw->fdir_fltr_lock); | |
8720 | ice_fdir_del_all_fltrs(vsi); | |
8721 | mutex_unlock(&hw->fdir_fltr_lock); | |
8722 | } | |
8723 | ||
fbc7b27a KP |
8724 | /* perform cleanup for channels if they exist */ |
8725 | list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { | |
8726 | struct ice_vsi *ch_vsi; | |
8727 | ||
8728 | list_del(&ch->list); | |
8729 | ch_vsi = ch->ch_vsi; | |
8730 | if (!ch_vsi) { | |
8731 | kfree(ch); | |
8732 | continue; | |
8733 | } | |
8734 | ||
8735 | /* Reset queue contexts */ | |
8736 | for (i = 0; i < ch->num_rxq; i++) { | |
8737 | struct ice_tx_ring *tx_ring; | |
8738 | struct ice_rx_ring *rx_ring; | |
8739 | ||
8740 | tx_ring = vsi->tx_rings[ch->base_q + i]; | |
8741 | rx_ring = vsi->rx_rings[ch->base_q + i]; | |
8742 | if (tx_ring) { | |
8743 | tx_ring->ch = NULL; | |
8744 | if (tx_ring->q_vector) | |
8745 | tx_ring->q_vector->ch = NULL; | |
8746 | } | |
8747 | if (rx_ring) { | |
8748 | rx_ring->ch = NULL; | |
8749 | if (rx_ring->q_vector) | |
8750 | rx_ring->q_vector->ch = NULL; | |
8751 | } | |
8752 | } | |
8753 | ||
40319796 KP |
8754 | /* Release FD resources for the channel VSI */ |
8755 | ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); | |
8756 | ||
fbc7b27a KP |
8757 | /* clear the VSI from scheduler tree */ |
8758 | ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); | |
8759 | ||
227bf450 | 8760 | /* Delete VSI from FW, PF and HW VSI arrays */ |
fbc7b27a KP |
8761 | ice_vsi_delete(ch->ch_vsi); |
8762 | ||
fbc7b27a KP |
8763 | /* free the channel */ |
8764 | kfree(ch); | |
8765 | } | |
8766 | ||
8767 | /* clear the channel VSI map which is stored in main VSI */ | |
8768 | ice_for_each_chnl_tc(i) | |
8769 | vsi->tc_map_vsi[i] = NULL; | |
8770 | ||
8771 | /* reset main VSI's all TC information */ | |
8772 | vsi->all_enatc = 0; | |
8773 | vsi->all_numtc = 0; | |
8774 | } | |
8775 | ||
8776 | /** | |
8777 | * ice_rebuild_channels - rebuild channel | |
8778 | * @pf: ptr to PF | |
8779 | * | |
8780 | * Recreate channel VSIs and replay filters | |
8781 | */ | |
8782 | static int ice_rebuild_channels(struct ice_pf *pf) | |
8783 | { | |
8784 | struct device *dev = ice_pf_to_dev(pf); | |
8785 | struct ice_vsi *main_vsi; | |
8786 | bool rem_adv_fltr = true; | |
8787 | struct ice_channel *ch; | |
8788 | struct ice_vsi *vsi; | |
8789 | int tc_idx = 1; | |
8790 | int i, err; | |
8791 | ||
8792 | main_vsi = ice_get_main_vsi(pf); | |
8793 | if (!main_vsi) | |
8794 | return 0; | |
8795 | ||
8796 | if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || | |
8797 | main_vsi->old_numtc == 1) | |
8798 | return 0; /* nothing to be done */ | |
8799 | ||
8800 | /* reconfigure main VSI based on old value of TC and cached values | |
8801 | * for MQPRIO opts | |
8802 | */ | |
8803 | err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); | |
8804 | if (err) { | |
8805 | dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n", | |
8806 | main_vsi->old_ena_tc, main_vsi->vsi_num); | |
8807 | return err; | |
8808 | } | |
8809 | ||
8810 | /* rebuild ADQ VSIs */ | |
8811 | ice_for_each_vsi(pf, i) { | |
8812 | enum ice_vsi_type type; | |
8813 | ||
8814 | vsi = pf->vsi[i]; | |
8815 | if (!vsi || vsi->type != ICE_VSI_CHNL) | |
8816 | continue; | |
8817 | ||
8818 | type = vsi->type; | |
8819 | ||
8820 | /* rebuild ADQ VSI */ | |
6624e780 | 8821 | err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT); |
fbc7b27a KP |
8822 | if (err) { |
8823 | dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", | |
8824 | ice_vsi_type_str(type), vsi->idx, err); | |
8825 | goto cleanup; | |
8826 | } | |
8827 | ||
8828 | /* Re-map HW VSI number, using VSI handle that has been | |
8829 | * previously validated in ice_replay_vsi() call above | |
8830 | */ | |
8831 | vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); | |
8832 | ||
8833 | /* replay filters for the VSI */ | |
8834 | err = ice_replay_vsi(&pf->hw, vsi->idx); | |
8835 | if (err) { | |
8836 | dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n", | |
8837 | ice_vsi_type_str(type), err, vsi->idx); | |
8838 | rem_adv_fltr = false; | |
8839 | goto cleanup; | |
8840 | } | |
8841 | dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n", | |
8842 | ice_vsi_type_str(type), vsi->idx); | |
8843 | ||
8844 | /* store ADQ VSI at correct TC index in main VSI's | |
8845 | * map of TC to VSI | |
8846 | */ | |
8847 | main_vsi->tc_map_vsi[tc_idx++] = vsi; | |
8848 | } | |
8849 | ||
8850 | /* ADQ VSI(s) has been rebuilt successfully, so setup | |
8851 | * channel for main VSI's Tx and Rx rings | |
8852 | */ | |
8853 | list_for_each_entry(ch, &main_vsi->ch_list, list) { | |
8854 | struct ice_vsi *ch_vsi; | |
8855 | ||
8856 | ch_vsi = ch->ch_vsi; | |
8857 | if (!ch_vsi) | |
8858 | continue; | |
8859 | ||
8860 | /* reconfig channel resources */ | |
8861 | ice_cfg_chnl_all_res(main_vsi, ch); | |
8862 | ||
8863 | /* replay BW rate limit if it is non-zero */ | |
8864 | if (!ch->max_tx_rate && !ch->min_tx_rate) | |
8865 | continue; | |
8866 | ||
8867 | err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, | |
8868 | ch->min_tx_rate); | |
8869 | if (err) | |
8870 | dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", | |
8871 | err, ch->max_tx_rate, ch->min_tx_rate, | |
8872 | ch_vsi->vsi_num); | |
8873 | else | |
8874 | dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n", | |
8875 | ch->max_tx_rate, ch->min_tx_rate, | |
8876 | ch_vsi->vsi_num); | |
8877 | } | |
8878 | ||
8879 | /* reconfig RSS for main VSI */ | |
8880 | if (main_vsi->ch_rss_size) | |
8881 | ice_vsi_cfg_rss_lut_key(main_vsi); | |
8882 | ||
8883 | return 0; | |
8884 | ||
8885 | cleanup: | |
8886 | ice_remove_q_channels(main_vsi, rem_adv_fltr); | |
8887 | return err; | |
8888 | } | |
8889 | ||
8890 | /** | |
8891 | * ice_create_q_channels - Add queue channel for the given TCs | |
8892 | * @vsi: VSI to be configured | |
8893 | * | |
8894 | * Configures queue channel mapping to the given TCs | |
8895 | */ | |
8896 | static int ice_create_q_channels(struct ice_vsi *vsi) | |
8897 | { | |
8898 | struct ice_pf *pf = vsi->back; | |
8899 | struct ice_channel *ch; | |
8900 | int ret = 0, i; | |
8901 | ||
8902 | ice_for_each_chnl_tc(i) { | |
8903 | if (!(vsi->all_enatc & BIT(i))) | |
8904 | continue; | |
8905 | ||
8906 | ch = kzalloc(sizeof(*ch), GFP_KERNEL); | |
8907 | if (!ch) { | |
8908 | ret = -ENOMEM; | |
8909 | goto err_free; | |
8910 | } | |
8911 | INIT_LIST_HEAD(&ch->list); | |
8912 | ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; | |
8913 | ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; | |
8914 | ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; | |
8915 | ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; | |
8916 | ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; | |
8917 | ||
8918 | /* convert to Kbits/s */ | |
8919 | if (ch->max_tx_rate) | |
8920 | ch->max_tx_rate = div_u64(ch->max_tx_rate, | |
8921 | ICE_BW_KBPS_DIVISOR); | |
8922 | if (ch->min_tx_rate) | |
8923 | ch->min_tx_rate = div_u64(ch->min_tx_rate, | |
8924 | ICE_BW_KBPS_DIVISOR); | |
8925 | ||
8926 | ret = ice_create_q_channel(vsi, ch); | |
8927 | if (ret) { | |
8928 | dev_err(ice_pf_to_dev(pf), | |
8929 | "failed creating channel TC:%d\n", i); | |
8930 | kfree(ch); | |
8931 | goto err_free; | |
8932 | } | |
8933 | list_add_tail(&ch->list, &vsi->ch_list); | |
8934 | vsi->tc_map_vsi[i] = ch->ch_vsi; | |
8935 | dev_dbg(ice_pf_to_dev(pf), | |
8936 | "successfully created channel: VSI %pK\n", ch->ch_vsi); | |
8937 | } | |
8938 | return 0; | |
8939 | ||
8940 | err_free: | |
8941 | ice_remove_q_channels(vsi, false); | |
8942 | ||
8943 | return ret; | |
8944 | } | |
8945 | ||
8946 | /** | |
8947 | * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes | |
8948 | * @netdev: net device to configure | |
8949 | * @type_data: TC offload data | |
8950 | */ | |
8951 | static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) | |
8952 | { | |
8953 | struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; | |
8954 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
8955 | struct ice_vsi *vsi = np->vsi; | |
8956 | struct ice_pf *pf = vsi->back; | |
8957 | u16 mode, ena_tc_qdisc = 0; | |
8958 | int cur_txq, cur_rxq; | |
8959 | u8 hw = 0, num_tcf; | |
8960 | struct device *dev; | |
8961 | int ret, i; | |
8962 | ||
8963 | dev = ice_pf_to_dev(pf); | |
8964 | num_tcf = mqprio_qopt->qopt.num_tc; | |
8965 | hw = mqprio_qopt->qopt.hw; | |
8966 | mode = mqprio_qopt->mode; | |
8967 | if (!hw) { | |
8968 | clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); | |
8969 | vsi->ch_rss_size = 0; | |
8970 | memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); | |
8971 | goto config_tcf; | |
8972 | } | |
8973 | ||
8974 | /* Generate queue region map for number of TCF requested */ | |
8975 | for (i = 0; i < num_tcf; i++) | |
8976 | ena_tc_qdisc |= BIT(i); | |
8977 | ||
8978 | switch (mode) { | |
8979 | case TC_MQPRIO_MODE_CHANNEL: | |
8980 | ||
80fe30a8 MW |
8981 | if (pf->hw.port_info->is_custom_tx_enabled) { |
8982 | dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n"); | |
8983 | return -EBUSY; | |
8984 | } | |
8985 | ice_tear_down_devlink_rate_tree(pf); | |
8986 | ||
fbc7b27a KP |
8987 | ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt); |
8988 | if (ret) { | |
8989 | netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n", | |
8990 | ret); | |
8991 | return ret; | |
8992 | } | |
8993 | memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); | |
8994 | set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); | |
9fea7498 KP |
8995 | /* don't assume state of hw_tc_offload during driver load |
8996 | * and set the flag for TC flower filter if hw_tc_offload | |
8997 | * already ON | |
8998 | */ | |
8999 | if (vsi->netdev->features & NETIF_F_HW_TC) | |
9000 | set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); | |
fbc7b27a KP |
9001 | break; |
9002 | default: | |
9003 | return -EINVAL; | |
9004 | } | |
9005 | ||
9006 | config_tcf: | |
9007 | ||
9008 | /* Requesting same TCF configuration as already enabled */ | |
9009 | if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && | |
9010 | mode != TC_MQPRIO_MODE_CHANNEL) | |
9011 | return 0; | |
9012 | ||
9013 | /* Pause VSI queues */ | |
9014 | ice_dis_vsi(vsi, true); | |
9015 | ||
9016 | if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) | |
9017 | ice_remove_q_channels(vsi, true); | |
9018 | ||
9019 | if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { | |
9020 | vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), | |
9021 | num_online_cpus()); | |
9022 | vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), | |
9023 | num_online_cpus()); | |
9024 | } else { | |
9025 | /* logic to rebuild VSI, same like ethtool -L */ | |
9026 | u16 offset = 0, qcount_tx = 0, qcount_rx = 0; | |
9027 | ||
9028 | for (i = 0; i < num_tcf; i++) { | |
9029 | if (!(ena_tc_qdisc & BIT(i))) | |
9030 | continue; | |
9031 | ||
9032 | offset = vsi->mqprio_qopt.qopt.offset[i]; | |
9033 | qcount_rx = vsi->mqprio_qopt.qopt.count[i]; | |
9034 | qcount_tx = vsi->mqprio_qopt.qopt.count[i]; | |
9035 | } | |
9036 | vsi->req_txq = offset + qcount_tx; | |
9037 | vsi->req_rxq = offset + qcount_rx; | |
9038 | ||
9039 | /* store away original rss_size info, so that it gets reused | |
9040 | * form ice_vsi_rebuild during tc-qdisc delete stage - to | |
9041 | * determine, what should be the rss_sizefor main VSI | |
9042 | */ | |
9043 | vsi->orig_rss_size = vsi->rss_size; | |
9044 | } | |
9045 | ||
9046 | /* save current values of Tx and Rx queues before calling VSI rebuild | |
9047 | * for fallback option | |
9048 | */ | |
9049 | cur_txq = vsi->num_txq; | |
9050 | cur_rxq = vsi->num_rxq; | |
9051 | ||
9052 | /* proceed with rebuild main VSI using correct number of queues */ | |
6624e780 | 9053 | ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); |
fbc7b27a KP |
9054 | if (ret) { |
9055 | /* fallback to current number of queues */ | |
9056 | dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); | |
9057 | vsi->req_txq = cur_txq; | |
9058 | vsi->req_rxq = cur_rxq; | |
9059 | clear_bit(ICE_RESET_FAILED, pf->state); | |
6624e780 | 9060 | if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { |
fbc7b27a KP |
9061 | dev_err(dev, "Rebuild of main VSI failed again\n"); |
9062 | return ret; | |
9063 | } | |
9064 | } | |
9065 | ||
9066 | vsi->all_numtc = num_tcf; | |
9067 | vsi->all_enatc = ena_tc_qdisc; | |
9068 | ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc); | |
9069 | if (ret) { | |
9070 | netdev_err(netdev, "failed configuring TC for VSI id=%d\n", | |
9071 | vsi->vsi_num); | |
9072 | goto exit; | |
9073 | } | |
9074 | ||
9075 | if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { | |
9076 | u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; | |
9077 | u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; | |
9078 | ||
9079 | /* set TC0 rate limit if specified */ | |
9080 | if (max_tx_rate || min_tx_rate) { | |
9081 | /* convert to Kbits/s */ | |
9082 | if (max_tx_rate) | |
9083 | max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR); | |
9084 | if (min_tx_rate) | |
9085 | min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR); | |
9086 | ||
9087 | ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate); | |
9088 | if (!ret) { | |
9089 | dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n", | |
9090 | max_tx_rate, min_tx_rate, vsi->vsi_num); | |
9091 | } else { | |
9092 | dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n", | |
9093 | max_tx_rate, min_tx_rate, vsi->vsi_num); | |
9094 | goto exit; | |
9095 | } | |
9096 | } | |
9097 | ret = ice_create_q_channels(vsi); | |
9098 | if (ret) { | |
9099 | netdev_err(netdev, "failed configuring queue channels\n"); | |
9100 | goto exit; | |
9101 | } else { | |
9102 | netdev_dbg(netdev, "successfully configured channels\n"); | |
9103 | } | |
9104 | } | |
9105 | ||
9106 | if (vsi->ch_rss_size) | |
9107 | ice_vsi_cfg_rss_lut_key(vsi); | |
9108 | ||
9109 | exit: | |
9110 | /* if error, reset the all_numtc and all_enatc */ | |
9111 | if (ret) { | |
9112 | vsi->all_numtc = 0; | |
9113 | vsi->all_enatc = 0; | |
9114 | } | |
9115 | /* resume VSI */ | |
9116 | ice_ena_vsi(vsi, true); | |
9117 | ||
9118 | return ret; | |
9119 | } | |
9120 | ||
0d08a441 KP |
9121 | static LIST_HEAD(ice_block_cb_list); |
9122 | ||
9123 | static int | |
9124 | ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, | |
9125 | void *type_data) | |
9126 | { | |
9127 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
fbc7b27a | 9128 | struct ice_pf *pf = np->vsi->back; |
4b31fd4d | 9129 | bool locked = false; |
fbc7b27a | 9130 | int err; |
0d08a441 KP |
9131 | |
9132 | switch (type) { | |
9133 | case TC_SETUP_BLOCK: | |
9134 | return flow_block_cb_setup_simple(type_data, | |
9135 | &ice_block_cb_list, | |
9136 | ice_setup_tc_block_cb, | |
9137 | np, np, true); | |
fbc7b27a | 9138 | case TC_SETUP_QDISC_MQPRIO: |
43d00e10 MS |
9139 | if (ice_is_eswitch_mode_switchdev(pf)) { |
9140 | netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); | |
9141 | return -EOPNOTSUPP; | |
9142 | } | |
9143 | ||
4b31fd4d RR |
9144 | if (pf->adev) { |
9145 | mutex_lock(&pf->adev_mutex); | |
9146 | device_lock(&pf->adev->dev); | |
9147 | locked = true; | |
9148 | if (pf->adev->dev.driver) { | |
9149 | netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); | |
9150 | err = -EBUSY; | |
9151 | goto adev_unlock; | |
9152 | } | |
9153 | } | |
9154 | ||
fbc7b27a KP |
9155 | /* setup traffic classifier for receive side */ |
9156 | mutex_lock(&pf->tc_mutex); | |
9157 | err = ice_setup_tc_mqprio_qdisc(netdev, type_data); | |
9158 | mutex_unlock(&pf->tc_mutex); | |
4b31fd4d RR |
9159 | |
9160 | adev_unlock: | |
9161 | if (locked) { | |
9162 | device_unlock(&pf->adev->dev); | |
9163 | mutex_unlock(&pf->adev_mutex); | |
9164 | } | |
fbc7b27a | 9165 | return err; |
0d08a441 KP |
9166 | default: |
9167 | return -EOPNOTSUPP; | |
9168 | } | |
9169 | return -EOPNOTSUPP; | |
9170 | } | |
9171 | ||
195bb48f MS |
9172 | static struct ice_indr_block_priv * |
9173 | ice_indr_block_priv_lookup(struct ice_netdev_priv *np, | |
9174 | struct net_device *netdev) | |
9175 | { | |
9176 | struct ice_indr_block_priv *cb_priv; | |
9177 | ||
9178 | list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { | |
9179 | if (!cb_priv->netdev) | |
9180 | return NULL; | |
9181 | if (cb_priv->netdev == netdev) | |
9182 | return cb_priv; | |
9183 | } | |
9184 | return NULL; | |
9185 | } | |
9186 | ||
9187 | static int | |
9188 | ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, | |
9189 | void *indr_priv) | |
9190 | { | |
9191 | struct ice_indr_block_priv *priv = indr_priv; | |
9192 | struct ice_netdev_priv *np = priv->np; | |
9193 | ||
9194 | switch (type) { | |
9195 | case TC_SETUP_CLSFLOWER: | |
9196 | return ice_setup_tc_cls_flower(np, priv->netdev, | |
9197 | (struct flow_cls_offload *) | |
9198 | type_data); | |
9199 | default: | |
9200 | return -EOPNOTSUPP; | |
9201 | } | |
9202 | } | |
9203 | ||
9204 | static int | |
9205 | ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch, | |
9206 | struct ice_netdev_priv *np, | |
9207 | struct flow_block_offload *f, void *data, | |
9208 | void (*cleanup)(struct flow_block_cb *block_cb)) | |
9209 | { | |
9210 | struct ice_indr_block_priv *indr_priv; | |
9211 | struct flow_block_cb *block_cb; | |
9212 | ||
9e300987 MS |
9213 | if (!ice_is_tunnel_supported(netdev) && |
9214 | !(is_vlan_dev(netdev) && | |
9215 | vlan_dev_real_dev(netdev) == np->vsi->netdev)) | |
9216 | return -EOPNOTSUPP; | |
9217 | ||
195bb48f MS |
9218 | if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
9219 | return -EOPNOTSUPP; | |
9220 | ||
9221 | switch (f->command) { | |
9222 | case FLOW_BLOCK_BIND: | |
9223 | indr_priv = ice_indr_block_priv_lookup(np, netdev); | |
9224 | if (indr_priv) | |
9225 | return -EEXIST; | |
9226 | ||
9227 | indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL); | |
9228 | if (!indr_priv) | |
9229 | return -ENOMEM; | |
9230 | ||
9231 | indr_priv->netdev = netdev; | |
9232 | indr_priv->np = np; | |
9233 | list_add(&indr_priv->list, &np->tc_indr_block_priv_list); | |
9234 | ||
9235 | block_cb = | |
9236 | flow_indr_block_cb_alloc(ice_indr_setup_block_cb, | |
9237 | indr_priv, indr_priv, | |
9238 | ice_rep_indr_tc_block_unbind, | |
9239 | f, netdev, sch, data, np, | |
9240 | cleanup); | |
9241 | ||
9242 | if (IS_ERR(block_cb)) { | |
9243 | list_del(&indr_priv->list); | |
9244 | kfree(indr_priv); | |
9245 | return PTR_ERR(block_cb); | |
9246 | } | |
9247 | flow_block_cb_add(block_cb, f); | |
9248 | list_add_tail(&block_cb->driver_list, &ice_block_cb_list); | |
9249 | break; | |
9250 | case FLOW_BLOCK_UNBIND: | |
9251 | indr_priv = ice_indr_block_priv_lookup(np, netdev); | |
9252 | if (!indr_priv) | |
9253 | return -ENOENT; | |
9254 | ||
9255 | block_cb = flow_block_cb_lookup(f->block, | |
9256 | ice_indr_setup_block_cb, | |
9257 | indr_priv); | |
9258 | if (!block_cb) | |
9259 | return -ENOENT; | |
9260 | ||
9261 | flow_indr_block_cb_remove(block_cb, f); | |
9262 | ||
9263 | list_del(&block_cb->driver_list); | |
9264 | break; | |
9265 | default: | |
9266 | return -EOPNOTSUPP; | |
9267 | } | |
9268 | return 0; | |
9269 | } | |
9270 | ||
9271 | static int | |
9272 | ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, | |
9273 | void *cb_priv, enum tc_setup_type type, void *type_data, | |
9274 | void *data, | |
9275 | void (*cleanup)(struct flow_block_cb *block_cb)) | |
9276 | { | |
9277 | switch (type) { | |
9278 | case TC_SETUP_BLOCK: | |
9279 | return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data, | |
9280 | data, cleanup); | |
9281 | ||
9282 | default: | |
9283 | return -EOPNOTSUPP; | |
9284 | } | |
9285 | } | |
9286 | ||
cdedef59 AV |
9287 | /** |
9288 | * ice_open - Called when a network interface becomes active | |
9289 | * @netdev: network interface device structure | |
9290 | * | |
9291 | * The open entry point is called when a network interface is made | |
df17b7e0 | 9292 | * active by the system (IFF_UP). At this point all resources needed |
cdedef59 AV |
9293 | * for transmit and receive operations are allocated, the interrupt |
9294 | * handler is registered with the OS, the netdev watchdog is enabled, | |
9295 | * and the stack is notified that the interface is ready. | |
9296 | * | |
9297 | * Returns 0 on success, negative value on failure | |
9298 | */ | |
0e674aeb | 9299 | int ice_open(struct net_device *netdev) |
e95fc857 KG |
9300 | { |
9301 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
9302 | struct ice_pf *pf = np->vsi->back; | |
9303 | ||
9304 | if (ice_is_reset_in_progress(pf->state)) { | |
9305 | netdev_err(netdev, "can't open net device while reset is in progress"); | |
9306 | return -EBUSY; | |
9307 | } | |
9308 | ||
9309 | return ice_open_internal(netdev); | |
9310 | } | |
9311 | ||
9312 | /** | |
9313 | * ice_open_internal - Called when a network interface becomes active | |
9314 | * @netdev: network interface device structure | |
9315 | * | |
9316 | * Internal ice_open implementation. Should not be used directly except for ice_open and reset | |
9317 | * handling routine | |
9318 | * | |
9319 | * Returns 0 on success, negative value on failure | |
9320 | */ | |
9321 | int ice_open_internal(struct net_device *netdev) | |
cdedef59 AV |
9322 | { |
9323 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
9324 | struct ice_vsi *vsi = np->vsi; | |
de75135b | 9325 | struct ice_pf *pf = vsi->back; |
6d599946 | 9326 | struct ice_port_info *pi; |
cdedef59 AV |
9327 | int err; |
9328 | ||
7e408e07 | 9329 | if (test_bit(ICE_NEEDS_RESTART, pf->state)) { |
0f9d5027 AV |
9330 | netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); |
9331 | return -EIO; | |
9332 | } | |
9333 | ||
cdedef59 AV |
9334 | netif_carrier_off(netdev); |
9335 | ||
6d599946 | 9336 | pi = vsi->port_info; |
2ccc1c1c TN |
9337 | err = ice_update_link_info(pi); |
9338 | if (err) { | |
9339 | netdev_err(netdev, "Failed to get link info, error %d\n", err); | |
c1484691 | 9340 | return err; |
b6f934f0 | 9341 | } |
cdedef59 | 9342 | |
99d40752 | 9343 | ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); |
c77849f5 | 9344 | |
6d599946 TN |
9345 | /* Set PHY if there is media, otherwise, turn off PHY */ |
9346 | if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { | |
1a3571b5 | 9347 | clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
7e408e07 | 9348 | if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { |
1a3571b5 PG |
9349 | err = ice_init_phy_user_cfg(pi); |
9350 | if (err) { | |
9351 | netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", | |
9352 | err); | |
9353 | return err; | |
9354 | } | |
9355 | } | |
9356 | ||
9357 | err = ice_configure_phy(vsi); | |
6d599946 | 9358 | if (err) { |
19cce2c6 | 9359 | netdev_err(netdev, "Failed to set physical link up, error %d\n", |
6d599946 TN |
9360 | err); |
9361 | return err; | |
9362 | } | |
9363 | } else { | |
1a3571b5 | 9364 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
d348d517 | 9365 | ice_set_link(vsi, false); |
6d599946 TN |
9366 | } |
9367 | ||
b6f934f0 | 9368 | err = ice_vsi_open(vsi); |
cdedef59 AV |
9369 | if (err) |
9370 | netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", | |
9371 | vsi->vsi_num, vsi->vsw->sw_id); | |
a4e82a81 TN |
9372 | |
9373 | /* Update existing tunnels information */ | |
9374 | udp_tunnel_get_rx_info(netdev); | |
9375 | ||
cdedef59 AV |
9376 | return err; |
9377 | } | |
9378 | ||
9379 | /** | |
9380 | * ice_stop - Disables a network interface | |
9381 | * @netdev: network interface device structure | |
9382 | * | |
9383 | * The stop entry point is called when an interface is de-activated by the OS, | |
df17b7e0 | 9384 | * and the netdevice enters the DOWN state. The hardware is still under the |
cdedef59 AV |
9385 | * driver's control, but the netdev interface is disabled. |
9386 | * | |
9387 | * Returns success only - not allowed to fail | |
9388 | */ | |
0e674aeb | 9389 | int ice_stop(struct net_device *netdev) |
cdedef59 AV |
9390 | { |
9391 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
9392 | struct ice_vsi *vsi = np->vsi; | |
e95fc857 KG |
9393 | struct ice_pf *pf = vsi->back; |
9394 | ||
9395 | if (ice_is_reset_in_progress(pf->state)) { | |
9396 | netdev_err(netdev, "can't stop net device while reset is in progress"); | |
9397 | return -EBUSY; | |
9398 | } | |
cdedef59 | 9399 | |
8ac71327 MP |
9400 | if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { |
9401 | int link_err = ice_force_phys_link_state(vsi, false); | |
9402 | ||
9403 | if (link_err) { | |
6a8d8bb5 KW |
9404 | if (link_err == -ENOMEDIUM) |
9405 | netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", | |
9406 | vsi->vsi_num); | |
9407 | else | |
9408 | netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", | |
9409 | vsi->vsi_num, link_err); | |
6d05ff55 NMK |
9410 | |
9411 | ice_vsi_close(vsi); | |
8ac71327 MP |
9412 | return -EIO; |
9413 | } | |
9414 | } | |
9415 | ||
cdedef59 AV |
9416 | ice_vsi_close(vsi); |
9417 | ||
9418 | return 0; | |
9419 | } | |
9420 | ||
e94d4478 AV |
9421 | /** |
9422 | * ice_features_check - Validate encapsulated packet conforms to limits | |
9423 | * @skb: skb buffer | |
9424 | * @netdev: This port's netdev | |
9425 | * @features: Offload features that the stack believes apply | |
9426 | */ | |
9427 | static netdev_features_t | |
9428 | ice_features_check(struct sk_buff *skb, | |
9429 | struct net_device __always_unused *netdev, | |
9430 | netdev_features_t features) | |
9431 | { | |
46b699c5 | 9432 | bool gso = skb_is_gso(skb); |
e94d4478 AV |
9433 | size_t len; |
9434 | ||
9435 | /* No point in doing any of this if neither checksum nor GSO are | |
df17b7e0 | 9436 | * being requested for this frame. We can rule out both by just |
e94d4478 AV |
9437 | * checking for CHECKSUM_PARTIAL |
9438 | */ | |
9439 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
9440 | return features; | |
9441 | ||
9442 | /* We cannot support GSO if the MSS is going to be less than | |
df17b7e0 | 9443 | * 64 bytes. If it is then we need to drop support for GSO. |
e94d4478 | 9444 | */ |
46b699c5 | 9445 | if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) |
e94d4478 AV |
9446 | features &= ~NETIF_F_GSO_MASK; |
9447 | ||
46b699c5 | 9448 | len = skb_network_offset(skb); |
a4e82a81 | 9449 | if (len > ICE_TXD_MACLEN_MAX || len & 0x1) |
e94d4478 AV |
9450 | goto out_rm_features; |
9451 | ||
46b699c5 | 9452 | len = skb_network_header_len(skb); |
a4e82a81 | 9453 | if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
e94d4478 AV |
9454 | goto out_rm_features; |
9455 | ||
9456 | if (skb->encapsulation) { | |
46b699c5 JB |
9457 | /* this must work for VXLAN frames AND IPIP/SIT frames, and in |
9458 | * the case of IPIP frames, the transport header pointer is | |
9459 | * after the inner header! So check to make sure that this | |
9460 | * is a GRE or UDP_TUNNEL frame before doing that math. | |
9461 | */ | |
9462 | if (gso && (skb_shinfo(skb)->gso_type & | |
9463 | (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { | |
9464 | len = skb_inner_network_header(skb) - | |
9465 | skb_transport_header(skb); | |
9466 | if (len > ICE_TXD_L4LEN_MAX || len & 0x1) | |
9467 | goto out_rm_features; | |
9468 | } | |
e94d4478 | 9469 | |
46b699c5 | 9470 | len = skb_inner_network_header_len(skb); |
a4e82a81 | 9471 | if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
e94d4478 AV |
9472 | goto out_rm_features; |
9473 | } | |
9474 | ||
9475 | return features; | |
9476 | out_rm_features: | |
9477 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); | |
9478 | } | |
9479 | ||
462acf6a TN |
9480 | static const struct net_device_ops ice_netdev_safe_mode_ops = { |
9481 | .ndo_open = ice_open, | |
9482 | .ndo_stop = ice_stop, | |
9483 | .ndo_start_xmit = ice_start_xmit, | |
9484 | .ndo_set_mac_address = ice_set_mac_address, | |
9485 | .ndo_validate_addr = eth_validate_addr, | |
9486 | .ndo_change_mtu = ice_change_mtu, | |
9487 | .ndo_get_stats64 = ice_get_stats64, | |
9488 | .ndo_tx_timeout = ice_tx_timeout, | |
ebc5399e | 9489 | .ndo_bpf = ice_xdp_safe_mode, |
462acf6a TN |
9490 | }; |
9491 | ||
cdedef59 AV |
9492 | static const struct net_device_ops ice_netdev_ops = { |
9493 | .ndo_open = ice_open, | |
9494 | .ndo_stop = ice_stop, | |
2b245cb2 | 9495 | .ndo_start_xmit = ice_start_xmit, |
2a87bd73 | 9496 | .ndo_select_queue = ice_select_queue, |
e94d4478 | 9497 | .ndo_features_check = ice_features_check, |
1babaf77 | 9498 | .ndo_fix_features = ice_fix_features, |
e94d4478 AV |
9499 | .ndo_set_rx_mode = ice_set_rx_mode, |
9500 | .ndo_set_mac_address = ice_set_mac_address, | |
9501 | .ndo_validate_addr = eth_validate_addr, | |
9502 | .ndo_change_mtu = ice_change_mtu, | |
fcea6f3d | 9503 | .ndo_get_stats64 = ice_get_stats64, |
1ddef455 | 9504 | .ndo_set_tx_maxrate = ice_set_tx_maxrate, |
a7605370 | 9505 | .ndo_eth_ioctl = ice_eth_ioctl, |
7c710869 AV |
9506 | .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, |
9507 | .ndo_set_vf_mac = ice_set_vf_mac, | |
9508 | .ndo_get_vf_config = ice_get_vf_cfg, | |
9509 | .ndo_set_vf_trust = ice_set_vf_trust, | |
9510 | .ndo_set_vf_vlan = ice_set_vf_port_vlan, | |
9511 | .ndo_set_vf_link_state = ice_set_vf_link_state, | |
730fdea4 | 9512 | .ndo_get_vf_stats = ice_get_vf_stats, |
4ecc8633 | 9513 | .ndo_set_vf_rate = ice_set_vf_bw, |
d76a60ba AV |
9514 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
9515 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, | |
0d08a441 | 9516 | .ndo_setup_tc = ice_setup_tc, |
d76a60ba | 9517 | .ndo_set_features = ice_set_features, |
b1edc14a MFIP |
9518 | .ndo_bridge_getlink = ice_bridge_getlink, |
9519 | .ndo_bridge_setlink = ice_bridge_setlink, | |
e94d4478 AV |
9520 | .ndo_fdb_add = ice_fdb_add, |
9521 | .ndo_fdb_del = ice_fdb_del, | |
28bf2672 BC |
9522 | #ifdef CONFIG_RFS_ACCEL |
9523 | .ndo_rx_flow_steer = ice_rx_flow_steer, | |
9524 | #endif | |
b3969fd7 | 9525 | .ndo_tx_timeout = ice_tx_timeout, |
efc2214b MF |
9526 | .ndo_bpf = ice_xdp, |
9527 | .ndo_xdp_xmit = ice_xdp_xmit, | |
2d4238f5 | 9528 | .ndo_xsk_wakeup = ice_xsk_wakeup, |
cdedef59 | 9529 | }; |