Commit | Line | Data |
---|---|---|
837f08fd AV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2018, Intel Corporation. */ | |
3 | ||
4 | /* Intel(R) Ethernet Connection E800 Series Linux Driver */ | |
5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | ||
34a2a3b8 | 8 | #include <generated/utsrelease.h> |
837f08fd | 9 | #include "ice.h" |
eff380aa | 10 | #include "ice_base.h" |
45d3d428 | 11 | #include "ice_lib.h" |
1b8f15b6 | 12 | #include "ice_fltr.h" |
37b6f646 | 13 | #include "ice_dcb_lib.h" |
b94b013e | 14 | #include "ice_dcb_nl.h" |
1adf7ead | 15 | #include "ice_devlink.h" |
3089cf6d JB |
16 | /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the |
17 | * ice tracepoint functions. This must be done exactly once across the | |
18 | * ice driver. | |
19 | */ | |
20 | #define CREATE_TRACE_POINTS | |
21 | #include "ice_trace.h" | |
837f08fd | 22 | |
837f08fd | 23 | #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" |
837f08fd AV |
24 | static const char ice_driver_string[] = DRV_SUMMARY; |
25 | static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; | |
26 | ||
462acf6a TN |
27 | /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */ |
28 | #define ICE_DDP_PKG_PATH "intel/ice/ddp/" | |
29 | #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" | |
30 | ||
837f08fd AV |
31 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
32 | MODULE_DESCRIPTION(DRV_SUMMARY); | |
98674ebe | 33 | MODULE_LICENSE("GPL v2"); |
462acf6a | 34 | MODULE_FIRMWARE(ICE_DDP_PKG_FILE); |
837f08fd AV |
35 | |
36 | static int debug = -1; | |
37 | module_param(debug, int, 0644); | |
7ec59eea AV |
38 | #ifndef CONFIG_DYNAMIC_DEBUG |
39 | MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)"); | |
40 | #else | |
41 | MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); | |
42 | #endif /* !CONFIG_DYNAMIC_DEBUG */ | |
837f08fd | 43 | |
d25a0fc4 DE |
44 | static DEFINE_IDA(ice_aux_ida); |
45 | ||
940b61af | 46 | static struct workqueue_struct *ice_wq; |
462acf6a | 47 | static const struct net_device_ops ice_netdev_safe_mode_ops; |
cdedef59 | 48 | static const struct net_device_ops ice_netdev_ops; |
87324e74 | 49 | static int ice_vsi_open(struct ice_vsi *vsi); |
940b61af | 50 | |
462acf6a | 51 | static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); |
28c2a645 | 52 | |
0f9d5027 | 53 | static void ice_vsi_release_all(struct ice_pf *pf); |
3a858ba3 | 54 | |
df006dd4 DE |
55 | bool netif_is_ice(struct net_device *dev) |
56 | { | |
57 | return dev && (dev->netdev_ops == &ice_netdev_ops); | |
58 | } | |
59 | ||
b3969fd7 SM |
60 | /** |
61 | * ice_get_tx_pending - returns number of Tx descriptors not processed | |
62 | * @ring: the ring of descriptors | |
63 | */ | |
c1ddf1f5 | 64 | static u16 ice_get_tx_pending(struct ice_ring *ring) |
b3969fd7 | 65 | { |
c1ddf1f5 | 66 | u16 head, tail; |
b3969fd7 SM |
67 | |
68 | head = ring->next_to_clean; | |
c1ddf1f5 | 69 | tail = ring->next_to_use; |
b3969fd7 SM |
70 | |
71 | if (head != tail) | |
72 | return (head < tail) ? | |
73 | tail - head : (tail + ring->count - head); | |
74 | return 0; | |
75 | } | |
76 | ||
77 | /** | |
78 | * ice_check_for_hang_subtask - check for and recover hung queues | |
79 | * @pf: pointer to PF struct | |
80 | */ | |
81 | static void ice_check_for_hang_subtask(struct ice_pf *pf) | |
82 | { | |
83 | struct ice_vsi *vsi = NULL; | |
e89e899f | 84 | struct ice_hw *hw; |
b3969fd7 | 85 | unsigned int i; |
b3969fd7 | 86 | int packets; |
e89e899f | 87 | u32 v; |
b3969fd7 SM |
88 | |
89 | ice_for_each_vsi(pf, v) | |
90 | if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { | |
91 | vsi = pf->vsi[v]; | |
92 | break; | |
93 | } | |
94 | ||
e97fb1ae | 95 | if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) |
b3969fd7 SM |
96 | return; |
97 | ||
98 | if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) | |
99 | return; | |
100 | ||
e89e899f BC |
101 | hw = &vsi->back->hw; |
102 | ||
b3969fd7 SM |
103 | for (i = 0; i < vsi->num_txq; i++) { |
104 | struct ice_ring *tx_ring = vsi->tx_rings[i]; | |
105 | ||
106 | if (tx_ring && tx_ring->desc) { | |
b3969fd7 SM |
107 | /* If packet counter has not changed the queue is |
108 | * likely stalled, so force an interrupt for this | |
109 | * queue. | |
110 | * | |
111 | * prev_pkt would be negative if there was no | |
112 | * pending work. | |
113 | */ | |
114 | packets = tx_ring->stats.pkts & INT_MAX; | |
115 | if (tx_ring->tx_stats.prev_pkt == packets) { | |
116 | /* Trigger sw interrupt to revive the queue */ | |
e89e899f | 117 | ice_trigger_sw_intr(hw, tx_ring->q_vector); |
b3969fd7 SM |
118 | continue; |
119 | } | |
120 | ||
121 | /* Memory barrier between read of packet count and call | |
122 | * to ice_get_tx_pending() | |
123 | */ | |
124 | smp_rmb(); | |
125 | tx_ring->tx_stats.prev_pkt = | |
126 | ice_get_tx_pending(tx_ring) ? packets : -1; | |
127 | } | |
128 | } | |
129 | } | |
130 | ||
561f4379 TN |
131 | /** |
132 | * ice_init_mac_fltr - Set initial MAC filters | |
133 | * @pf: board private structure | |
134 | * | |
2f2da36e | 135 | * Set initial set of MAC filters for PF VSI; configure filters for permanent |
561f4379 TN |
136 | * address and broadcast address. If an error is encountered, netdevice will be |
137 | * unregistered. | |
138 | */ | |
139 | static int ice_init_mac_fltr(struct ice_pf *pf) | |
140 | { | |
bbb968e8 | 141 | enum ice_status status; |
561f4379 | 142 | struct ice_vsi *vsi; |
1b8f15b6 | 143 | u8 *perm_addr; |
561f4379 | 144 | |
208ff751 | 145 | vsi = ice_get_main_vsi(pf); |
561f4379 TN |
146 | if (!vsi) |
147 | return -EINVAL; | |
148 | ||
1b8f15b6 MS |
149 | perm_addr = vsi->port_info->mac.perm_addr; |
150 | status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI); | |
1e23f076 AV |
151 | if (status) |
152 | return -EIO; | |
561f4379 | 153 | |
1e23f076 | 154 | return 0; |
561f4379 TN |
155 | } |
156 | ||
e94d4478 | 157 | /** |
f9867df6 | 158 | * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced |
e94d4478 | 159 | * @netdev: the net device on which the sync is happening |
f9867df6 | 160 | * @addr: MAC address to sync |
e94d4478 AV |
161 | * |
162 | * This is a callback function which is called by the in kernel device sync | |
163 | * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only | |
164 | * populates the tmp_sync_list, which is later used by ice_add_mac to add the | |
f9867df6 | 165 | * MAC filters from the hardware. |
e94d4478 AV |
166 | */ |
167 | static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr) | |
168 | { | |
169 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
170 | struct ice_vsi *vsi = np->vsi; | |
171 | ||
1b8f15b6 MS |
172 | if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, |
173 | ICE_FWD_TO_VSI)) | |
e94d4478 AV |
174 | return -EINVAL; |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
179 | /** | |
f9867df6 | 180 | * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced |
e94d4478 | 181 | * @netdev: the net device on which the unsync is happening |
f9867df6 | 182 | * @addr: MAC address to unsync |
e94d4478 AV |
183 | * |
184 | * This is a callback function which is called by the in kernel device unsync | |
185 | * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only | |
186 | * populates the tmp_unsync_list, which is later used by ice_remove_mac to | |
f9867df6 | 187 | * delete the MAC filters from the hardware. |
e94d4478 AV |
188 | */ |
189 | static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr) | |
190 | { | |
191 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
192 | struct ice_vsi *vsi = np->vsi; | |
193 | ||
3ba7f53f BC |
194 | /* Under some circumstances, we might receive a request to delete our |
195 | * own device address from our uc list. Because we store the device | |
196 | * address in the VSI's MAC filter list, we need to ignore such | |
197 | * requests and not delete our device address from this list. | |
198 | */ | |
199 | if (ether_addr_equal(addr, netdev->dev_addr)) | |
200 | return 0; | |
201 | ||
1b8f15b6 MS |
202 | if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, |
203 | ICE_FWD_TO_VSI)) | |
e94d4478 AV |
204 | return -EINVAL; |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
e94d4478 AV |
209 | /** |
210 | * ice_vsi_fltr_changed - check if filter state changed | |
211 | * @vsi: VSI to be checked | |
212 | * | |
213 | * returns true if filter state has changed, false otherwise. | |
214 | */ | |
215 | static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) | |
216 | { | |
e97fb1ae AV |
217 | return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || |
218 | test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) || | |
219 | test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); | |
e94d4478 AV |
220 | } |
221 | ||
5eda8afd AA |
222 | /** |
223 | * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF | |
224 | * @vsi: the VSI being configured | |
225 | * @promisc_m: mask of promiscuous config bits | |
226 | * @set_promisc: enable or disable promisc flag request | |
227 | * | |
228 | */ | |
229 | static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) | |
230 | { | |
231 | struct ice_hw *hw = &vsi->back->hw; | |
232 | enum ice_status status = 0; | |
233 | ||
234 | if (vsi->type != ICE_VSI_PF) | |
235 | return 0; | |
236 | ||
bcf68ea1 | 237 | if (vsi->num_vlan > 1) { |
5eda8afd AA |
238 | status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, |
239 | set_promisc); | |
240 | } else { | |
241 | if (set_promisc) | |
242 | status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m, | |
243 | 0); | |
244 | else | |
245 | status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m, | |
246 | 0); | |
247 | } | |
248 | ||
249 | if (status) | |
250 | return -EIO; | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
e94d4478 AV |
255 | /** |
256 | * ice_vsi_sync_fltr - Update the VSI filter list to the HW | |
257 | * @vsi: ptr to the VSI | |
258 | * | |
259 | * Push any outstanding VSI filter changes through the AdminQ. | |
260 | */ | |
261 | static int ice_vsi_sync_fltr(struct ice_vsi *vsi) | |
262 | { | |
9a946843 | 263 | struct device *dev = ice_pf_to_dev(vsi->back); |
e94d4478 AV |
264 | struct net_device *netdev = vsi->netdev; |
265 | bool promisc_forced_on = false; | |
266 | struct ice_pf *pf = vsi->back; | |
267 | struct ice_hw *hw = &pf->hw; | |
268 | enum ice_status status = 0; | |
269 | u32 changed_flags = 0; | |
5eda8afd | 270 | u8 promisc_m; |
e94d4478 AV |
271 | int err = 0; |
272 | ||
273 | if (!vsi->netdev) | |
274 | return -EINVAL; | |
275 | ||
7e408e07 | 276 | while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) |
e94d4478 AV |
277 | usleep_range(1000, 2000); |
278 | ||
279 | changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; | |
280 | vsi->current_netdev_flags = vsi->netdev->flags; | |
281 | ||
282 | INIT_LIST_HEAD(&vsi->tmp_sync_list); | |
283 | INIT_LIST_HEAD(&vsi->tmp_unsync_list); | |
284 | ||
285 | if (ice_vsi_fltr_changed(vsi)) { | |
e97fb1ae AV |
286 | clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
287 | clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); | |
288 | clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); | |
e94d4478 AV |
289 | |
290 | /* grab the netdev's addr_list_lock */ | |
291 | netif_addr_lock_bh(netdev); | |
292 | __dev_uc_sync(netdev, ice_add_mac_to_sync_list, | |
293 | ice_add_mac_to_unsync_list); | |
294 | __dev_mc_sync(netdev, ice_add_mac_to_sync_list, | |
295 | ice_add_mac_to_unsync_list); | |
296 | /* our temp lists are populated. release lock */ | |
297 | netif_addr_unlock_bh(netdev); | |
298 | } | |
299 | ||
f9867df6 | 300 | /* Remove MAC addresses in the unsync list */ |
1b8f15b6 MS |
301 | status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); |
302 | ice_fltr_free_list(dev, &vsi->tmp_unsync_list); | |
e94d4478 AV |
303 | if (status) { |
304 | netdev_err(netdev, "Failed to delete MAC filters\n"); | |
305 | /* if we failed because of alloc failures, just bail */ | |
306 | if (status == ICE_ERR_NO_MEMORY) { | |
307 | err = -ENOMEM; | |
308 | goto out; | |
309 | } | |
310 | } | |
311 | ||
f9867df6 | 312 | /* Add MAC addresses in the sync list */ |
1b8f15b6 MS |
313 | status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); |
314 | ice_fltr_free_list(dev, &vsi->tmp_sync_list); | |
89f3e4a5 PB |
315 | /* If filter is added successfully or already exists, do not go into |
316 | * 'if' condition and report it as error. Instead continue processing | |
317 | * rest of the function. | |
318 | */ | |
319 | if (status && status != ICE_ERR_ALREADY_EXISTS) { | |
e94d4478 | 320 | netdev_err(netdev, "Failed to add MAC filters\n"); |
f9867df6 | 321 | /* If there is no more space for new umac filters, VSI |
e94d4478 AV |
322 | * should go into promiscuous mode. There should be some |
323 | * space reserved for promiscuous filters. | |
324 | */ | |
325 | if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && | |
7e408e07 | 326 | !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC, |
e94d4478 AV |
327 | vsi->state)) { |
328 | promisc_forced_on = true; | |
19cce2c6 | 329 | netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n", |
e94d4478 AV |
330 | vsi->vsi_num); |
331 | } else { | |
332 | err = -EIO; | |
333 | goto out; | |
334 | } | |
335 | } | |
336 | /* check for changes in promiscuous modes */ | |
5eda8afd AA |
337 | if (changed_flags & IFF_ALLMULTI) { |
338 | if (vsi->current_netdev_flags & IFF_ALLMULTI) { | |
bcf68ea1 | 339 | if (vsi->num_vlan > 1) |
5eda8afd AA |
340 | promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; |
341 | else | |
342 | promisc_m = ICE_MCAST_PROMISC_BITS; | |
343 | ||
344 | err = ice_cfg_promisc(vsi, promisc_m, true); | |
345 | if (err) { | |
346 | netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n", | |
347 | vsi->vsi_num); | |
348 | vsi->current_netdev_flags &= ~IFF_ALLMULTI; | |
349 | goto out_promisc; | |
350 | } | |
92ace482 BA |
351 | } else { |
352 | /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ | |
bcf68ea1 | 353 | if (vsi->num_vlan > 1) |
5eda8afd AA |
354 | promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; |
355 | else | |
356 | promisc_m = ICE_MCAST_PROMISC_BITS; | |
357 | ||
358 | err = ice_cfg_promisc(vsi, promisc_m, false); | |
359 | if (err) { | |
360 | netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n", | |
361 | vsi->vsi_num); | |
362 | vsi->current_netdev_flags |= IFF_ALLMULTI; | |
363 | goto out_promisc; | |
364 | } | |
365 | } | |
366 | } | |
e94d4478 AV |
367 | |
368 | if (((changed_flags & IFF_PROMISC) || promisc_forced_on) || | |
e97fb1ae AV |
369 | test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { |
370 | clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); | |
e94d4478 | 371 | if (vsi->current_netdev_flags & IFF_PROMISC) { |
f9867df6 | 372 | /* Apply Rx filter rule to get traffic from wire */ |
fc0f39bc BC |
373 | if (!ice_is_dflt_vsi_in_use(pf->first_sw)) { |
374 | err = ice_set_dflt_vsi(pf->first_sw, vsi); | |
375 | if (err && err != -EEXIST) { | |
19cce2c6 | 376 | netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n", |
fc0f39bc BC |
377 | err, vsi->vsi_num); |
378 | vsi->current_netdev_flags &= | |
379 | ~IFF_PROMISC; | |
380 | goto out_promisc; | |
381 | } | |
68d210a6 | 382 | ice_cfg_vlan_pruning(vsi, false, false); |
e94d4478 AV |
383 | } |
384 | } else { | |
f9867df6 | 385 | /* Clear Rx filter to remove traffic from wire */ |
fc0f39bc BC |
386 | if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) { |
387 | err = ice_clear_dflt_vsi(pf->first_sw); | |
388 | if (err) { | |
19cce2c6 | 389 | netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n", |
fc0f39bc BC |
390 | err, vsi->vsi_num); |
391 | vsi->current_netdev_flags |= | |
392 | IFF_PROMISC; | |
393 | goto out_promisc; | |
394 | } | |
68d210a6 NN |
395 | if (vsi->num_vlan > 1) |
396 | ice_cfg_vlan_pruning(vsi, true, false); | |
e94d4478 AV |
397 | } |
398 | } | |
399 | } | |
400 | goto exit; | |
401 | ||
402 | out_promisc: | |
e97fb1ae | 403 | set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); |
e94d4478 AV |
404 | goto exit; |
405 | out: | |
406 | /* if something went wrong then set the changed flag so we try again */ | |
e97fb1ae AV |
407 | set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
408 | set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); | |
e94d4478 | 409 | exit: |
7e408e07 | 410 | clear_bit(ICE_CFG_BUSY, vsi->state); |
e94d4478 AV |
411 | return err; |
412 | } | |
413 | ||
414 | /** | |
415 | * ice_sync_fltr_subtask - Sync the VSI filter list with HW | |
416 | * @pf: board private structure | |
417 | */ | |
418 | static void ice_sync_fltr_subtask(struct ice_pf *pf) | |
419 | { | |
420 | int v; | |
421 | ||
422 | if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) | |
423 | return; | |
424 | ||
425 | clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); | |
426 | ||
80ed404a | 427 | ice_for_each_vsi(pf, v) |
e94d4478 AV |
428 | if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && |
429 | ice_vsi_sync_fltr(pf->vsi[v])) { | |
430 | /* come back and try again later */ | |
431 | set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); | |
432 | break; | |
433 | } | |
434 | } | |
435 | ||
7b9ffc76 AV |
436 | /** |
437 | * ice_pf_dis_all_vsi - Pause all VSIs on a PF | |
438 | * @pf: the PF | |
439 | * @locked: is the rtnl_lock already held | |
440 | */ | |
7b9ffc76 | 441 | static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) |
7b9ffc76 | 442 | { |
b126bd6b | 443 | int node; |
7b9ffc76 AV |
444 | int v; |
445 | ||
446 | ice_for_each_vsi(pf, v) | |
447 | if (pf->vsi[v]) | |
448 | ice_dis_vsi(pf->vsi[v], locked); | |
b126bd6b KP |
449 | |
450 | for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++) | |
451 | pf->pf_agg_node[node].num_vsis = 0; | |
452 | ||
453 | for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++) | |
454 | pf->vf_agg_node[node].num_vsis = 0; | |
7b9ffc76 AV |
455 | } |
456 | ||
0b28b702 AV |
457 | /** |
458 | * ice_prepare_for_reset - prep for the core to reset | |
459 | * @pf: board private structure | |
460 | * | |
461 | * Inform or close all dependent features in prep for reset. | |
462 | */ | |
463 | static void | |
464 | ice_prepare_for_reset(struct ice_pf *pf) | |
465 | { | |
466 | struct ice_hw *hw = &pf->hw; | |
c1e08830 | 467 | unsigned int i; |
0b28b702 | 468 | |
5abac9d7 | 469 | /* already prepared for reset */ |
7e408e07 | 470 | if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) |
5abac9d7 BC |
471 | return; |
472 | ||
f9f5301e DE |
473 | ice_unplug_aux_dev(pf); |
474 | ||
007676b4 AV |
475 | /* Notify VFs of impending reset */ |
476 | if (ice_check_sq_alive(hw, &hw->mailboxq)) | |
477 | ice_vc_notify_reset(pf); | |
478 | ||
c7aeb4d1 | 479 | /* Disable VFs until reset is completed */ |
005881bc | 480 | ice_for_each_vf(pf, i) |
77ca27c4 | 481 | ice_set_vf_state_qs_dis(&pf->vf[i]); |
c7aeb4d1 | 482 | |
462acf6a TN |
483 | /* clear SW filtering DB */ |
484 | ice_clear_hw_tbls(hw); | |
0b28b702 | 485 | /* disable the VSIs and their queues that are not already DOWN */ |
7b9ffc76 | 486 | ice_pf_dis_all_vsi(pf, false); |
0b28b702 | 487 | |
06c16d89 JK |
488 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
489 | ice_ptp_release(pf); | |
490 | ||
c5a2a4a3 UK |
491 | if (hw->port_info) |
492 | ice_sched_clear_port(hw->port_info); | |
493 | ||
0b28b702 | 494 | ice_shutdown_all_ctrlq(hw); |
0f9d5027 | 495 | |
7e408e07 | 496 | set_bit(ICE_PREPARED_FOR_RESET, pf->state); |
0b28b702 AV |
497 | } |
498 | ||
499 | /** | |
500 | * ice_do_reset - Initiate one of many types of resets | |
501 | * @pf: board private structure | |
502 | * @reset_type: reset type requested | |
503 | * before this function was called. | |
504 | */ | |
505 | static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) | |
506 | { | |
4015d11e | 507 | struct device *dev = ice_pf_to_dev(pf); |
0b28b702 AV |
508 | struct ice_hw *hw = &pf->hw; |
509 | ||
510 | dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); | |
0b28b702 | 511 | |
0f9d5027 | 512 | ice_prepare_for_reset(pf); |
0b28b702 AV |
513 | |
514 | /* trigger the reset */ | |
515 | if (ice_reset(hw, reset_type)) { | |
516 | dev_err(dev, "reset %d failed\n", reset_type); | |
7e408e07 AV |
517 | set_bit(ICE_RESET_FAILED, pf->state); |
518 | clear_bit(ICE_RESET_OICR_RECV, pf->state); | |
519 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); | |
520 | clear_bit(ICE_PFR_REQ, pf->state); | |
521 | clear_bit(ICE_CORER_REQ, pf->state); | |
522 | clear_bit(ICE_GLOBR_REQ, pf->state); | |
1c08052e | 523 | wake_up(&pf->reset_wait_queue); |
0b28b702 AV |
524 | return; |
525 | } | |
526 | ||
0f9d5027 AV |
527 | /* PFR is a bit of a special case because it doesn't result in an OICR |
528 | * interrupt. So for PFR, rebuild after the reset and clear the reset- | |
529 | * associated state bits. | |
530 | */ | |
0b28b702 AV |
531 | if (reset_type == ICE_RESET_PFR) { |
532 | pf->pfr_count++; | |
462acf6a | 533 | ice_rebuild(pf, reset_type); |
7e408e07 AV |
534 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); |
535 | clear_bit(ICE_PFR_REQ, pf->state); | |
1c08052e | 536 | wake_up(&pf->reset_wait_queue); |
1c44e3bc | 537 | ice_reset_all_vfs(pf, true); |
0b28b702 AV |
538 | } |
539 | } | |
540 | ||
541 | /** | |
542 | * ice_reset_subtask - Set up for resetting the device and driver | |
543 | * @pf: board private structure | |
544 | */ | |
545 | static void ice_reset_subtask(struct ice_pf *pf) | |
546 | { | |
0f9d5027 | 547 | enum ice_reset_req reset_type = ICE_RESET_INVAL; |
0b28b702 AV |
548 | |
549 | /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an | |
0f9d5027 AV |
550 | * OICR interrupt. The OICR handler (ice_misc_intr) determines what type |
551 | * of reset is pending and sets bits in pf->state indicating the reset | |
7e408e07 | 552 | * type and ICE_RESET_OICR_RECV. So, if the latter bit is set |
0f9d5027 AV |
553 | * prepare for pending reset if not already (for PF software-initiated |
554 | * global resets the software should already be prepared for it as | |
7e408e07 | 555 | * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated |
0f9d5027 AV |
556 | * by firmware or software on other PFs, that bit is not set so prepare |
557 | * for the reset now), poll for reset done, rebuild and return. | |
0b28b702 | 558 | */ |
7e408e07 | 559 | if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { |
2ebd4428 | 560 | /* Perform the largest reset requested */ |
7e408e07 | 561 | if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) |
2ebd4428 | 562 | reset_type = ICE_RESET_CORER; |
7e408e07 | 563 | if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) |
2ebd4428 | 564 | reset_type = ICE_RESET_GLOBR; |
7e408e07 | 565 | if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) |
03af8406 | 566 | reset_type = ICE_RESET_EMPR; |
2ebd4428 DE |
567 | /* return if no valid reset type requested */ |
568 | if (reset_type == ICE_RESET_INVAL) | |
569 | return; | |
5abac9d7 | 570 | ice_prepare_for_reset(pf); |
0b28b702 AV |
571 | |
572 | /* make sure we are ready to rebuild */ | |
fd2a9817 | 573 | if (ice_check_reset(&pf->hw)) { |
7e408e07 | 574 | set_bit(ICE_RESET_FAILED, pf->state); |
fd2a9817 AV |
575 | } else { |
576 | /* done with reset. start rebuild */ | |
577 | pf->hw.reset_ongoing = false; | |
462acf6a | 578 | ice_rebuild(pf, reset_type); |
0f9d5027 | 579 | /* clear bit to resume normal operations, but |
94c4441b | 580 | * ICE_NEEDS_RESTART bit is set in case rebuild failed |
0f9d5027 | 581 | */ |
7e408e07 AV |
582 | clear_bit(ICE_RESET_OICR_RECV, pf->state); |
583 | clear_bit(ICE_PREPARED_FOR_RESET, pf->state); | |
584 | clear_bit(ICE_PFR_REQ, pf->state); | |
585 | clear_bit(ICE_CORER_REQ, pf->state); | |
586 | clear_bit(ICE_GLOBR_REQ, pf->state); | |
1c08052e | 587 | wake_up(&pf->reset_wait_queue); |
1c44e3bc | 588 | ice_reset_all_vfs(pf, true); |
fd2a9817 | 589 | } |
0f9d5027 AV |
590 | |
591 | return; | |
0b28b702 AV |
592 | } |
593 | ||
594 | /* No pending resets to finish processing. Check for new resets */ | |
7e408e07 | 595 | if (test_bit(ICE_PFR_REQ, pf->state)) |
0f9d5027 | 596 | reset_type = ICE_RESET_PFR; |
7e408e07 | 597 | if (test_bit(ICE_CORER_REQ, pf->state)) |
0f9d5027 | 598 | reset_type = ICE_RESET_CORER; |
7e408e07 | 599 | if (test_bit(ICE_GLOBR_REQ, pf->state)) |
0b28b702 | 600 | reset_type = ICE_RESET_GLOBR; |
0f9d5027 AV |
601 | /* If no valid reset type requested just return */ |
602 | if (reset_type == ICE_RESET_INVAL) | |
603 | return; | |
0b28b702 | 604 | |
0f9d5027 | 605 | /* reset if not already down or busy */ |
7e408e07 AV |
606 | if (!test_bit(ICE_DOWN, pf->state) && |
607 | !test_bit(ICE_CFG_BUSY, pf->state)) { | |
0b28b702 AV |
608 | ice_do_reset(pf, reset_type); |
609 | } | |
0b28b702 AV |
610 | } |
611 | ||
2e0ab37c JB |
612 | /** |
613 | * ice_print_topo_conflict - print topology conflict message | |
614 | * @vsi: the VSI whose topology status is being checked | |
615 | */ | |
616 | static void ice_print_topo_conflict(struct ice_vsi *vsi) | |
617 | { | |
618 | switch (vsi->port_info->phy.link_info.topo_media_conflict) { | |
619 | case ICE_AQ_LINK_TOPO_CONFLICT: | |
620 | case ICE_AQ_LINK_MEDIA_CONFLICT: | |
5878589d PG |
621 | case ICE_AQ_LINK_TOPO_UNREACH_PRT: |
622 | case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT: | |
623 | case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA: | |
5c57145a | 624 | netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); |
2e0ab37c | 625 | break; |
5878589d | 626 | case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: |
4fc5fbee AV |
627 | if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) |
628 | netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); | |
629 | else | |
630 | netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); | |
5878589d | 631 | break; |
2e0ab37c JB |
632 | default: |
633 | break; | |
634 | } | |
635 | } | |
636 | ||
cdedef59 AV |
637 | /** |
638 | * ice_print_link_msg - print link up or down message | |
639 | * @vsi: the VSI whose link status is being queried | |
640 | * @isup: boolean for if the link is now up or down | |
641 | */ | |
fcea6f3d | 642 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup) |
cdedef59 | 643 | { |
f776b3ac | 644 | struct ice_aqc_get_phy_caps_data *caps; |
5ee30564 | 645 | const char *an_advertised; |
f776b3ac PG |
646 | enum ice_status status; |
647 | const char *fec_req; | |
cdedef59 | 648 | const char *speed; |
f776b3ac | 649 | const char *fec; |
cdedef59 | 650 | const char *fc; |
43260988 | 651 | const char *an; |
cdedef59 | 652 | |
c2a23e00 BC |
653 | if (!vsi) |
654 | return; | |
655 | ||
cdedef59 AV |
656 | if (vsi->current_isup == isup) |
657 | return; | |
658 | ||
659 | vsi->current_isup = isup; | |
660 | ||
661 | if (!isup) { | |
662 | netdev_info(vsi->netdev, "NIC Link is Down\n"); | |
663 | return; | |
664 | } | |
665 | ||
666 | switch (vsi->port_info->phy.link_info.link_speed) { | |
072efdf8 AV |
667 | case ICE_AQ_LINK_SPEED_100GB: |
668 | speed = "100 G"; | |
669 | break; | |
670 | case ICE_AQ_LINK_SPEED_50GB: | |
671 | speed = "50 G"; | |
672 | break; | |
cdedef59 AV |
673 | case ICE_AQ_LINK_SPEED_40GB: |
674 | speed = "40 G"; | |
675 | break; | |
676 | case ICE_AQ_LINK_SPEED_25GB: | |
677 | speed = "25 G"; | |
678 | break; | |
679 | case ICE_AQ_LINK_SPEED_20GB: | |
680 | speed = "20 G"; | |
681 | break; | |
682 | case ICE_AQ_LINK_SPEED_10GB: | |
683 | speed = "10 G"; | |
684 | break; | |
685 | case ICE_AQ_LINK_SPEED_5GB: | |
686 | speed = "5 G"; | |
687 | break; | |
688 | case ICE_AQ_LINK_SPEED_2500MB: | |
689 | speed = "2.5 G"; | |
690 | break; | |
691 | case ICE_AQ_LINK_SPEED_1000MB: | |
692 | speed = "1 G"; | |
693 | break; | |
694 | case ICE_AQ_LINK_SPEED_100MB: | |
695 | speed = "100 M"; | |
696 | break; | |
697 | default: | |
5b13886d | 698 | speed = "Unknown "; |
cdedef59 AV |
699 | break; |
700 | } | |
701 | ||
702 | switch (vsi->port_info->fc.current_mode) { | |
703 | case ICE_FC_FULL: | |
2f2da36e | 704 | fc = "Rx/Tx"; |
cdedef59 AV |
705 | break; |
706 | case ICE_FC_TX_PAUSE: | |
2f2da36e | 707 | fc = "Tx"; |
cdedef59 AV |
708 | break; |
709 | case ICE_FC_RX_PAUSE: | |
2f2da36e | 710 | fc = "Rx"; |
cdedef59 | 711 | break; |
203a068a BC |
712 | case ICE_FC_NONE: |
713 | fc = "None"; | |
714 | break; | |
cdedef59 AV |
715 | default: |
716 | fc = "Unknown"; | |
717 | break; | |
718 | } | |
719 | ||
f776b3ac PG |
720 | /* Get FEC mode based on negotiated link info */ |
721 | switch (vsi->port_info->phy.link_info.fec_info) { | |
722 | case ICE_AQ_LINK_25G_RS_528_FEC_EN: | |
f776b3ac PG |
723 | case ICE_AQ_LINK_25G_RS_544_FEC_EN: |
724 | fec = "RS-FEC"; | |
725 | break; | |
726 | case ICE_AQ_LINK_25G_KR_FEC_EN: | |
727 | fec = "FC-FEC/BASE-R"; | |
728 | break; | |
729 | default: | |
730 | fec = "NONE"; | |
731 | break; | |
732 | } | |
733 | ||
43260988 JB |
734 | /* check if autoneg completed, might be false due to not supported */ |
735 | if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) | |
736 | an = "True"; | |
737 | else | |
738 | an = "False"; | |
739 | ||
f776b3ac | 740 | /* Get FEC mode requested based on PHY caps last SW configuration */ |
9efe35d0 | 741 | caps = kzalloc(sizeof(*caps), GFP_KERNEL); |
f776b3ac PG |
742 | if (!caps) { |
743 | fec_req = "Unknown"; | |
5ee30564 | 744 | an_advertised = "Unknown"; |
f776b3ac PG |
745 | goto done; |
746 | } | |
747 | ||
748 | status = ice_aq_get_phy_caps(vsi->port_info, false, | |
d6730a87 | 749 | ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); |
f776b3ac PG |
750 | if (status) |
751 | netdev_info(vsi->netdev, "Get phy capability failed.\n"); | |
752 | ||
5ee30564 PG |
753 | an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; |
754 | ||
f776b3ac PG |
755 | if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || |
756 | caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) | |
757 | fec_req = "RS-FEC"; | |
758 | else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || | |
759 | caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) | |
760 | fec_req = "FC-FEC/BASE-R"; | |
761 | else | |
762 | fec_req = "NONE"; | |
763 | ||
9efe35d0 | 764 | kfree(caps); |
f776b3ac PG |
765 | |
766 | done: | |
5ee30564 PG |
767 | netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", |
768 | speed, fec_req, fec, an_advertised, an, fc); | |
2e0ab37c | 769 | ice_print_topo_conflict(vsi); |
cdedef59 AV |
770 | } |
771 | ||
0b28b702 | 772 | /** |
f9867df6 AV |
773 | * ice_vsi_link_event - update the VSI's netdev |
774 | * @vsi: the VSI on which the link event occurred | |
775 | * @link_up: whether or not the VSI needs to be set up or down | |
0b28b702 AV |
776 | */ |
777 | static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) | |
778 | { | |
c2a23e00 BC |
779 | if (!vsi) |
780 | return; | |
781 | ||
e97fb1ae | 782 | if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) |
0b28b702 AV |
783 | return; |
784 | ||
785 | if (vsi->type == ICE_VSI_PF) { | |
c2a23e00 | 786 | if (link_up == netif_carrier_ok(vsi->netdev)) |
0b28b702 | 787 | return; |
c2a23e00 | 788 | |
0b28b702 AV |
789 | if (link_up) { |
790 | netif_carrier_on(vsi->netdev); | |
791 | netif_tx_wake_all_queues(vsi->netdev); | |
792 | } else { | |
793 | netif_carrier_off(vsi->netdev); | |
794 | netif_tx_stop_all_queues(vsi->netdev); | |
795 | } | |
796 | } | |
797 | } | |
798 | ||
7d9c9b79 DE |
799 | /** |
800 | * ice_set_dflt_mib - send a default config MIB to the FW | |
801 | * @pf: private PF struct | |
802 | * | |
803 | * This function sends a default configuration MIB to the FW. | |
804 | * | |
805 | * If this function errors out at any point, the driver is still able to | |
806 | * function. The main impact is that LFC may not operate as expected. | |
807 | * Therefore an error state in this function should be treated with a DBG | |
808 | * message and continue on with driver rebuild/reenable. | |
809 | */ | |
810 | static void ice_set_dflt_mib(struct ice_pf *pf) | |
811 | { | |
812 | struct device *dev = ice_pf_to_dev(pf); | |
813 | u8 mib_type, *buf, *lldpmib = NULL; | |
814 | u16 len, typelen, offset = 0; | |
815 | struct ice_lldp_org_tlv *tlv; | |
12aae8f1 | 816 | struct ice_hw *hw = &pf->hw; |
7d9c9b79 DE |
817 | u32 ouisubtype; |
818 | ||
7d9c9b79 DE |
819 | mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; |
820 | lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); | |
821 | if (!lldpmib) { | |
822 | dev_dbg(dev, "%s Failed to allocate MIB memory\n", | |
823 | __func__); | |
824 | return; | |
825 | } | |
826 | ||
827 | /* Add ETS CFG TLV */ | |
828 | tlv = (struct ice_lldp_org_tlv *)lldpmib; | |
829 | typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | | |
830 | ICE_IEEE_ETS_TLV_LEN); | |
831 | tlv->typelen = htons(typelen); | |
832 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | | |
833 | ICE_IEEE_SUBTYPE_ETS_CFG); | |
834 | tlv->ouisubtype = htonl(ouisubtype); | |
835 | ||
836 | buf = tlv->tlvinfo; | |
837 | buf[0] = 0; | |
838 | ||
839 | /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. | |
840 | * Octets 5 - 12 are BW values, set octet 5 to 100% BW. | |
841 | * Octets 13 - 20 are TSA values - leave as zeros | |
842 | */ | |
843 | buf[5] = 0x64; | |
844 | len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; | |
845 | offset += len + 2; | |
846 | tlv = (struct ice_lldp_org_tlv *) | |
847 | ((char *)tlv + sizeof(tlv->typelen) + len); | |
848 | ||
849 | /* Add ETS REC TLV */ | |
850 | buf = tlv->tlvinfo; | |
851 | tlv->typelen = htons(typelen); | |
852 | ||
853 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | | |
854 | ICE_IEEE_SUBTYPE_ETS_REC); | |
855 | tlv->ouisubtype = htonl(ouisubtype); | |
856 | ||
857 | /* First octet of buf is reserved | |
858 | * Octets 1 - 4 map UP to TC - all UPs map to zero | |
859 | * Octets 5 - 12 are BW values - set TC 0 to 100%. | |
860 | * Octets 13 - 20 are TSA value - leave as zeros | |
861 | */ | |
862 | buf[5] = 0x64; | |
863 | offset += len + 2; | |
864 | tlv = (struct ice_lldp_org_tlv *) | |
865 | ((char *)tlv + sizeof(tlv->typelen) + len); | |
866 | ||
867 | /* Add PFC CFG TLV */ | |
868 | typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | | |
869 | ICE_IEEE_PFC_TLV_LEN); | |
870 | tlv->typelen = htons(typelen); | |
871 | ||
872 | ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | | |
873 | ICE_IEEE_SUBTYPE_PFC_CFG); | |
874 | tlv->ouisubtype = htonl(ouisubtype); | |
875 | ||
876 | /* Octet 1 left as all zeros - PFC disabled */ | |
877 | buf[0] = 0x08; | |
878 | len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; | |
879 | offset += len + 2; | |
880 | ||
881 | if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) | |
882 | dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); | |
883 | ||
884 | kfree(lldpmib); | |
885 | } | |
886 | ||
c77849f5 AV |
887 | /** |
888 | * ice_check_module_power | |
889 | * @pf: pointer to PF struct | |
890 | * @link_cfg_err: bitmap from the link info structure | |
891 | * | |
892 | * check module power level returned by a previous call to aq_get_link_info | |
893 | * and print error messages if module power level is not supported | |
894 | */ | |
895 | static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) | |
896 | { | |
897 | /* if module power level is supported, clear the flag */ | |
898 | if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | | |
899 | ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { | |
900 | clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); | |
901 | return; | |
902 | } | |
903 | ||
904 | /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the | |
905 | * above block didn't clear this bit, there's nothing to do | |
906 | */ | |
907 | if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) | |
908 | return; | |
909 | ||
910 | if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { | |
911 | dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); | |
912 | set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); | |
913 | } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { | |
914 | dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); | |
915 | set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); | |
916 | } | |
917 | } | |
918 | ||
0b28b702 AV |
919 | /** |
920 | * ice_link_event - process the link event | |
2f2da36e | 921 | * @pf: PF that the link event is associated with |
0b28b702 | 922 | * @pi: port_info for the port that the link event is associated with |
c2a23e00 BC |
923 | * @link_up: true if the physical link is up and false if it is down |
924 | * @link_speed: current link speed received from the link event | |
0b28b702 | 925 | * |
c2a23e00 | 926 | * Returns 0 on success and negative on failure |
0b28b702 AV |
927 | */ |
928 | static int | |
c2a23e00 BC |
929 | ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, |
930 | u16 link_speed) | |
0b28b702 | 931 | { |
4015d11e | 932 | struct device *dev = ice_pf_to_dev(pf); |
0b28b702 | 933 | struct ice_phy_info *phy_info; |
d348d517 | 934 | enum ice_status status; |
c2a23e00 BC |
935 | struct ice_vsi *vsi; |
936 | u16 old_link_speed; | |
937 | bool old_link; | |
0b28b702 AV |
938 | |
939 | phy_info = &pi->phy; | |
940 | phy_info->link_info_old = phy_info->link_info; | |
0b28b702 | 941 | |
c2a23e00 | 942 | old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); |
0b28b702 AV |
943 | old_link_speed = phy_info->link_info_old.link_speed; |
944 | ||
c2a23e00 BC |
945 | /* update the link info structures and re-enable link events, |
946 | * don't bail on failure due to other book keeping needed | |
947 | */ | |
d348d517 AV |
948 | status = ice_update_link_info(pi); |
949 | if (status) | |
950 | dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n", | |
951 | pi->lport, ice_stat_str(status), | |
952 | ice_aq_str(pi->hw->adminq.sq_last_status)); | |
0b28b702 | 953 | |
c77849f5 AV |
954 | ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); |
955 | ||
0ce6c34a DE |
956 | /* Check if the link state is up after updating link info, and treat |
957 | * this event as an UP event since the link is actually UP now. | |
958 | */ | |
959 | if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) | |
960 | link_up = true; | |
961 | ||
208ff751 | 962 | vsi = ice_get_main_vsi(pf); |
c2a23e00 BC |
963 | if (!vsi || !vsi->port_info) |
964 | return -EINVAL; | |
0b28b702 | 965 | |
6d599946 TN |
966 | /* turn off PHY if media was removed */ |
967 | if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && | |
968 | !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { | |
969 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); | |
d348d517 | 970 | ice_set_link(vsi, false); |
6d599946 TN |
971 | } |
972 | ||
1a3571b5 PG |
973 | /* if the old link up/down and speed is the same as the new */ |
974 | if (link_up == old_link && link_speed == old_link_speed) | |
d348d517 | 975 | return 0; |
1a3571b5 | 976 | |
7d9c9b79 DE |
977 | if (ice_is_dcb_active(pf)) { |
978 | if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) | |
979 | ice_dcb_rebuild(pf); | |
980 | } else { | |
981 | if (link_up) | |
982 | ice_set_dflt_mib(pf); | |
983 | } | |
c2a23e00 BC |
984 | ice_vsi_link_event(vsi, link_up); |
985 | ice_print_link_msg(vsi, link_up); | |
0b28b702 | 986 | |
26a91525 | 987 | ice_vc_notify_link_state(pf); |
53b8decb | 988 | |
d348d517 | 989 | return 0; |
0b28b702 AV |
990 | } |
991 | ||
992 | /** | |
4f4be03b AV |
993 | * ice_watchdog_subtask - periodic tasks not using event driven scheduling |
994 | * @pf: board private structure | |
0b28b702 | 995 | */ |
4f4be03b | 996 | static void ice_watchdog_subtask(struct ice_pf *pf) |
0b28b702 | 997 | { |
4f4be03b | 998 | int i; |
0b28b702 | 999 | |
4f4be03b | 1000 | /* if interface is down do nothing */ |
7e408e07 AV |
1001 | if (test_bit(ICE_DOWN, pf->state) || |
1002 | test_bit(ICE_CFG_BUSY, pf->state)) | |
4f4be03b | 1003 | return; |
0b28b702 | 1004 | |
4f4be03b AV |
1005 | /* make sure we don't do these things too often */ |
1006 | if (time_before(jiffies, | |
1007 | pf->serv_tmr_prev + pf->serv_tmr_period)) | |
1008 | return; | |
0b28b702 | 1009 | |
4f4be03b AV |
1010 | pf->serv_tmr_prev = jiffies; |
1011 | ||
4f4be03b AV |
1012 | /* Update the stats for active netdevs so the network stack |
1013 | * can look at updated numbers whenever it cares to | |
1014 | */ | |
1015 | ice_update_pf_stats(pf); | |
80ed404a | 1016 | ice_for_each_vsi(pf, i) |
4f4be03b AV |
1017 | if (pf->vsi[i] && pf->vsi[i]->netdev) |
1018 | ice_update_vsi_stats(pf->vsi[i]); | |
0b28b702 AV |
1019 | } |
1020 | ||
250c3b3e BC |
1021 | /** |
1022 | * ice_init_link_events - enable/initialize link events | |
1023 | * @pi: pointer to the port_info instance | |
1024 | * | |
1025 | * Returns -EIO on failure, 0 on success | |
1026 | */ | |
1027 | static int ice_init_link_events(struct ice_port_info *pi) | |
1028 | { | |
1029 | u16 mask; | |
1030 | ||
1031 | mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | | |
1032 | ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL)); | |
1033 | ||
1034 | if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { | |
19cce2c6 | 1035 | dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", |
250c3b3e BC |
1036 | pi->lport); |
1037 | return -EIO; | |
1038 | } | |
1039 | ||
1040 | if (ice_aq_get_link_info(pi, true, NULL, NULL)) { | |
19cce2c6 | 1041 | dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", |
250c3b3e BC |
1042 | pi->lport); |
1043 | return -EIO; | |
1044 | } | |
1045 | ||
1046 | return 0; | |
1047 | } | |
1048 | ||
1049 | /** | |
1050 | * ice_handle_link_event - handle link event via ARQ | |
2f2da36e | 1051 | * @pf: PF that the link event is associated with |
c2a23e00 | 1052 | * @event: event structure containing link status info |
250c3b3e | 1053 | */ |
c2a23e00 BC |
1054 | static int |
1055 | ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) | |
250c3b3e | 1056 | { |
c2a23e00 | 1057 | struct ice_aqc_get_link_status_data *link_data; |
250c3b3e BC |
1058 | struct ice_port_info *port_info; |
1059 | int status; | |
1060 | ||
c2a23e00 | 1061 | link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; |
250c3b3e BC |
1062 | port_info = pf->hw.port_info; |
1063 | if (!port_info) | |
1064 | return -EINVAL; | |
1065 | ||
c2a23e00 BC |
1066 | status = ice_link_event(pf, port_info, |
1067 | !!(link_data->link_info & ICE_AQ_LINK_UP), | |
1068 | le16_to_cpu(link_data->link_speed)); | |
250c3b3e | 1069 | if (status) |
19cce2c6 AV |
1070 | dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", |
1071 | status); | |
250c3b3e BC |
1072 | |
1073 | return status; | |
1074 | } | |
1075 | ||
d69ea414 JK |
1076 | enum ice_aq_task_state { |
1077 | ICE_AQ_TASK_WAITING = 0, | |
1078 | ICE_AQ_TASK_COMPLETE, | |
1079 | ICE_AQ_TASK_CANCELED, | |
1080 | }; | |
1081 | ||
1082 | struct ice_aq_task { | |
1083 | struct hlist_node entry; | |
1084 | ||
1085 | u16 opcode; | |
1086 | struct ice_rq_event_info *event; | |
1087 | enum ice_aq_task_state state; | |
1088 | }; | |
1089 | ||
1090 | /** | |
ef860480 | 1091 | * ice_aq_wait_for_event - Wait for an AdminQ event from firmware |
d69ea414 JK |
1092 | * @pf: pointer to the PF private structure |
1093 | * @opcode: the opcode to wait for | |
1094 | * @timeout: how long to wait, in jiffies | |
1095 | * @event: storage for the event info | |
1096 | * | |
1097 | * Waits for a specific AdminQ completion event on the ARQ for a given PF. The | |
1098 | * current thread will be put to sleep until the specified event occurs or | |
1099 | * until the given timeout is reached. | |
1100 | * | |
1101 | * To obtain only the descriptor contents, pass an event without an allocated | |
1102 | * msg_buf. If the complete data buffer is desired, allocate the | |
1103 | * event->msg_buf with enough space ahead of time. | |
1104 | * | |
1105 | * Returns: zero on success, or a negative error code on failure. | |
1106 | */ | |
1107 | int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, | |
1108 | struct ice_rq_event_info *event) | |
1109 | { | |
1e8249cc | 1110 | struct device *dev = ice_pf_to_dev(pf); |
d69ea414 | 1111 | struct ice_aq_task *task; |
1e8249cc | 1112 | unsigned long start; |
d69ea414 JK |
1113 | long ret; |
1114 | int err; | |
1115 | ||
1116 | task = kzalloc(sizeof(*task), GFP_KERNEL); | |
1117 | if (!task) | |
1118 | return -ENOMEM; | |
1119 | ||
1120 | INIT_HLIST_NODE(&task->entry); | |
1121 | task->opcode = opcode; | |
1122 | task->event = event; | |
1123 | task->state = ICE_AQ_TASK_WAITING; | |
1124 | ||
1125 | spin_lock_bh(&pf->aq_wait_lock); | |
1126 | hlist_add_head(&task->entry, &pf->aq_wait_list); | |
1127 | spin_unlock_bh(&pf->aq_wait_lock); | |
1128 | ||
1e8249cc JK |
1129 | start = jiffies; |
1130 | ||
d69ea414 JK |
1131 | ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, |
1132 | timeout); | |
1133 | switch (task->state) { | |
1134 | case ICE_AQ_TASK_WAITING: | |
1135 | err = ret < 0 ? ret : -ETIMEDOUT; | |
1136 | break; | |
1137 | case ICE_AQ_TASK_CANCELED: | |
1138 | err = ret < 0 ? ret : -ECANCELED; | |
1139 | break; | |
1140 | case ICE_AQ_TASK_COMPLETE: | |
1141 | err = ret < 0 ? ret : 0; | |
1142 | break; | |
1143 | default: | |
1144 | WARN(1, "Unexpected AdminQ wait task state %u", task->state); | |
1145 | err = -EINVAL; | |
1146 | break; | |
1147 | } | |
1148 | ||
1e8249cc JK |
1149 | dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n", |
1150 | jiffies_to_msecs(jiffies - start), | |
1151 | jiffies_to_msecs(timeout), | |
1152 | opcode); | |
1153 | ||
d69ea414 JK |
1154 | spin_lock_bh(&pf->aq_wait_lock); |
1155 | hlist_del(&task->entry); | |
1156 | spin_unlock_bh(&pf->aq_wait_lock); | |
1157 | kfree(task); | |
1158 | ||
1159 | return err; | |
1160 | } | |
1161 | ||
1162 | /** | |
1163 | * ice_aq_check_events - Check if any thread is waiting for an AdminQ event | |
1164 | * @pf: pointer to the PF private structure | |
1165 | * @opcode: the opcode of the event | |
1166 | * @event: the event to check | |
1167 | * | |
1168 | * Loops over the current list of pending threads waiting for an AdminQ event. | |
1169 | * For each matching task, copy the contents of the event into the task | |
1170 | * structure and wake up the thread. | |
1171 | * | |
1172 | * If multiple threads wait for the same opcode, they will all be woken up. | |
1173 | * | |
1174 | * Note that event->msg_buf will only be duplicated if the event has a buffer | |
1175 | * with enough space already allocated. Otherwise, only the descriptor and | |
1176 | * message length will be copied. | |
1177 | * | |
1178 | * Returns: true if an event was found, false otherwise | |
1179 | */ | |
1180 | static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, | |
1181 | struct ice_rq_event_info *event) | |
1182 | { | |
1183 | struct ice_aq_task *task; | |
1184 | bool found = false; | |
1185 | ||
1186 | spin_lock_bh(&pf->aq_wait_lock); | |
1187 | hlist_for_each_entry(task, &pf->aq_wait_list, entry) { | |
1188 | if (task->state || task->opcode != opcode) | |
1189 | continue; | |
1190 | ||
1191 | memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); | |
1192 | task->event->msg_len = event->msg_len; | |
1193 | ||
1194 | /* Only copy the data buffer if a destination was set */ | |
1195 | if (task->event->msg_buf && | |
1196 | task->event->buf_len > event->buf_len) { | |
1197 | memcpy(task->event->msg_buf, event->msg_buf, | |
1198 | event->buf_len); | |
1199 | task->event->buf_len = event->buf_len; | |
1200 | } | |
1201 | ||
1202 | task->state = ICE_AQ_TASK_COMPLETE; | |
1203 | found = true; | |
1204 | } | |
1205 | spin_unlock_bh(&pf->aq_wait_lock); | |
1206 | ||
1207 | if (found) | |
1208 | wake_up(&pf->aq_wait_queue); | |
1209 | } | |
1210 | ||
1211 | /** | |
1212 | * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks | |
1213 | * @pf: the PF private structure | |
1214 | * | |
1215 | * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. | |
1216 | * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. | |
1217 | */ | |
1218 | static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) | |
1219 | { | |
1220 | struct ice_aq_task *task; | |
1221 | ||
1222 | spin_lock_bh(&pf->aq_wait_lock); | |
1223 | hlist_for_each_entry(task, &pf->aq_wait_list, entry) | |
1224 | task->state = ICE_AQ_TASK_CANCELED; | |
1225 | spin_unlock_bh(&pf->aq_wait_lock); | |
1226 | ||
1227 | wake_up(&pf->aq_wait_queue); | |
1228 | } | |
1229 | ||
940b61af AV |
1230 | /** |
1231 | * __ice_clean_ctrlq - helper function to clean controlq rings | |
1232 | * @pf: ptr to struct ice_pf | |
1233 | * @q_type: specific Control queue type | |
1234 | */ | |
1235 | static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) | |
1236 | { | |
4015d11e | 1237 | struct device *dev = ice_pf_to_dev(pf); |
940b61af AV |
1238 | struct ice_rq_event_info event; |
1239 | struct ice_hw *hw = &pf->hw; | |
1240 | struct ice_ctl_q_info *cq; | |
1241 | u16 pending, i = 0; | |
1242 | const char *qtype; | |
1243 | u32 oldval, val; | |
1244 | ||
0b28b702 | 1245 | /* Do not clean control queue if/when PF reset fails */ |
7e408e07 | 1246 | if (test_bit(ICE_RESET_FAILED, pf->state)) |
0b28b702 AV |
1247 | return 0; |
1248 | ||
940b61af AV |
1249 | switch (q_type) { |
1250 | case ICE_CTL_Q_ADMIN: | |
1251 | cq = &hw->adminq; | |
1252 | qtype = "Admin"; | |
1253 | break; | |
8f5ee3c4 JK |
1254 | case ICE_CTL_Q_SB: |
1255 | cq = &hw->sbq; | |
1256 | qtype = "Sideband"; | |
1257 | break; | |
75d2b253 AV |
1258 | case ICE_CTL_Q_MAILBOX: |
1259 | cq = &hw->mailboxq; | |
1260 | qtype = "Mailbox"; | |
0891c896 VS |
1261 | /* we are going to try to detect a malicious VF, so set the |
1262 | * state to begin detection | |
1263 | */ | |
1264 | hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; | |
75d2b253 | 1265 | break; |
940b61af | 1266 | default: |
4015d11e | 1267 | dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); |
940b61af AV |
1268 | return 0; |
1269 | } | |
1270 | ||
1271 | /* check for error indications - PF_xx_AxQLEN register layout for | |
1272 | * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN. | |
1273 | */ | |
1274 | val = rd32(hw, cq->rq.len); | |
1275 | if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | | |
1276 | PF_FW_ARQLEN_ARQCRIT_M)) { | |
1277 | oldval = val; | |
1278 | if (val & PF_FW_ARQLEN_ARQVFE_M) | |
4015d11e BC |
1279 | dev_dbg(dev, "%s Receive Queue VF Error detected\n", |
1280 | qtype); | |
940b61af | 1281 | if (val & PF_FW_ARQLEN_ARQOVFL_M) { |
19cce2c6 | 1282 | dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", |
940b61af AV |
1283 | qtype); |
1284 | } | |
1285 | if (val & PF_FW_ARQLEN_ARQCRIT_M) | |
19cce2c6 | 1286 | dev_dbg(dev, "%s Receive Queue Critical Error detected\n", |
940b61af AV |
1287 | qtype); |
1288 | val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | | |
1289 | PF_FW_ARQLEN_ARQCRIT_M); | |
1290 | if (oldval != val) | |
1291 | wr32(hw, cq->rq.len, val); | |
1292 | } | |
1293 | ||
1294 | val = rd32(hw, cq->sq.len); | |
1295 | if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | | |
1296 | PF_FW_ATQLEN_ATQCRIT_M)) { | |
1297 | oldval = val; | |
1298 | if (val & PF_FW_ATQLEN_ATQVFE_M) | |
19cce2c6 AV |
1299 | dev_dbg(dev, "%s Send Queue VF Error detected\n", |
1300 | qtype); | |
940b61af | 1301 | if (val & PF_FW_ATQLEN_ATQOVFL_M) { |
4015d11e | 1302 | dev_dbg(dev, "%s Send Queue Overflow Error detected\n", |
940b61af AV |
1303 | qtype); |
1304 | } | |
1305 | if (val & PF_FW_ATQLEN_ATQCRIT_M) | |
4015d11e | 1306 | dev_dbg(dev, "%s Send Queue Critical Error detected\n", |
940b61af AV |
1307 | qtype); |
1308 | val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | | |
1309 | PF_FW_ATQLEN_ATQCRIT_M); | |
1310 | if (oldval != val) | |
1311 | wr32(hw, cq->sq.len, val); | |
1312 | } | |
1313 | ||
1314 | event.buf_len = cq->rq_buf_size; | |
9efe35d0 | 1315 | event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); |
940b61af AV |
1316 | if (!event.msg_buf) |
1317 | return 0; | |
1318 | ||
1319 | do { | |
1320 | enum ice_status ret; | |
0b28b702 | 1321 | u16 opcode; |
940b61af AV |
1322 | |
1323 | ret = ice_clean_rq_elem(hw, cq, &event, &pending); | |
1324 | if (ret == ICE_ERR_AQ_NO_WORK) | |
1325 | break; | |
1326 | if (ret) { | |
0fee3577 LY |
1327 | dev_err(dev, "%s Receive Queue event error %s\n", qtype, |
1328 | ice_stat_str(ret)); | |
940b61af AV |
1329 | break; |
1330 | } | |
0b28b702 AV |
1331 | |
1332 | opcode = le16_to_cpu(event.desc.opcode); | |
1333 | ||
d69ea414 JK |
1334 | /* Notify any thread that might be waiting for this event */ |
1335 | ice_aq_check_events(pf, opcode, &event); | |
1336 | ||
0b28b702 | 1337 | switch (opcode) { |
250c3b3e | 1338 | case ice_aqc_opc_get_link_status: |
c2a23e00 | 1339 | if (ice_handle_link_event(pf, &event)) |
4015d11e | 1340 | dev_err(dev, "Could not handle link event\n"); |
250c3b3e | 1341 | break; |
2309ae38 BC |
1342 | case ice_aqc_opc_event_lan_overflow: |
1343 | ice_vf_lan_overflow_event(pf, &event); | |
1344 | break; | |
1071a835 | 1345 | case ice_mbx_opc_send_msg_to_pf: |
0891c896 VS |
1346 | if (!ice_is_malicious_vf(pf, &event, i, pending)) |
1347 | ice_vc_process_vf_msg(pf, &event); | |
1071a835 | 1348 | break; |
8b97ceb1 HT |
1349 | case ice_aqc_opc_fw_logging: |
1350 | ice_output_fw_log(hw, &event.desc, event.msg_buf); | |
1351 | break; | |
00cc3f1b AV |
1352 | case ice_aqc_opc_lldp_set_mib_change: |
1353 | ice_dcb_process_lldp_set_mib_change(pf, &event); | |
1354 | break; | |
0b28b702 | 1355 | default: |
19cce2c6 | 1356 | dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", |
0b28b702 AV |
1357 | qtype, opcode); |
1358 | break; | |
1359 | } | |
940b61af AV |
1360 | } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); |
1361 | ||
9efe35d0 | 1362 | kfree(event.msg_buf); |
940b61af AV |
1363 | |
1364 | return pending && (i == ICE_DFLT_IRQ_WORK); | |
1365 | } | |
1366 | ||
3d6b640e AV |
1367 | /** |
1368 | * ice_ctrlq_pending - check if there is a difference between ntc and ntu | |
1369 | * @hw: pointer to hardware info | |
1370 | * @cq: control queue information | |
1371 | * | |
1372 | * returns true if there are pending messages in a queue, false if there aren't | |
1373 | */ | |
1374 | static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) | |
1375 | { | |
1376 | u16 ntu; | |
1377 | ||
1378 | ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); | |
1379 | return cq->rq.next_to_clean != ntu; | |
1380 | } | |
1381 | ||
940b61af AV |
1382 | /** |
1383 | * ice_clean_adminq_subtask - clean the AdminQ rings | |
1384 | * @pf: board private structure | |
1385 | */ | |
1386 | static void ice_clean_adminq_subtask(struct ice_pf *pf) | |
1387 | { | |
1388 | struct ice_hw *hw = &pf->hw; | |
940b61af | 1389 | |
7e408e07 | 1390 | if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) |
940b61af AV |
1391 | return; |
1392 | ||
1393 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN)) | |
1394 | return; | |
1395 | ||
7e408e07 | 1396 | clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); |
940b61af | 1397 | |
3d6b640e AV |
1398 | /* There might be a situation where new messages arrive to a control |
1399 | * queue between processing the last message and clearing the | |
1400 | * EVENT_PENDING bit. So before exiting, check queue head again (using | |
1401 | * ice_ctrlq_pending) and process new messages if any. | |
1402 | */ | |
1403 | if (ice_ctrlq_pending(hw, &hw->adminq)) | |
1404 | __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN); | |
940b61af AV |
1405 | |
1406 | ice_flush(hw); | |
1407 | } | |
1408 | ||
75d2b253 AV |
1409 | /** |
1410 | * ice_clean_mailboxq_subtask - clean the MailboxQ rings | |
1411 | * @pf: board private structure | |
1412 | */ | |
1413 | static void ice_clean_mailboxq_subtask(struct ice_pf *pf) | |
1414 | { | |
1415 | struct ice_hw *hw = &pf->hw; | |
1416 | ||
7e408e07 | 1417 | if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) |
75d2b253 AV |
1418 | return; |
1419 | ||
1420 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) | |
1421 | return; | |
1422 | ||
7e408e07 | 1423 | clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); |
75d2b253 AV |
1424 | |
1425 | if (ice_ctrlq_pending(hw, &hw->mailboxq)) | |
1426 | __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); | |
1427 | ||
1428 | ice_flush(hw); | |
1429 | } | |
1430 | ||
8f5ee3c4 JK |
1431 | /** |
1432 | * ice_clean_sbq_subtask - clean the Sideband Queue rings | |
1433 | * @pf: board private structure | |
1434 | */ | |
1435 | static void ice_clean_sbq_subtask(struct ice_pf *pf) | |
1436 | { | |
1437 | struct ice_hw *hw = &pf->hw; | |
1438 | ||
1439 | /* Nothing to do here if sideband queue is not supported */ | |
1440 | if (!ice_is_sbq_supported(hw)) { | |
1441 | clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); | |
1442 | return; | |
1443 | } | |
1444 | ||
1445 | if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) | |
1446 | return; | |
1447 | ||
1448 | if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) | |
1449 | return; | |
1450 | ||
1451 | clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); | |
1452 | ||
1453 | if (ice_ctrlq_pending(hw, &hw->sbq)) | |
1454 | __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); | |
1455 | ||
1456 | ice_flush(hw); | |
1457 | } | |
1458 | ||
940b61af AV |
1459 | /** |
1460 | * ice_service_task_schedule - schedule the service task to wake up | |
1461 | * @pf: board private structure | |
1462 | * | |
1463 | * If not already scheduled, this puts the task into the work queue. | |
1464 | */ | |
28bf2672 | 1465 | void ice_service_task_schedule(struct ice_pf *pf) |
940b61af | 1466 | { |
7e408e07 AV |
1467 | if (!test_bit(ICE_SERVICE_DIS, pf->state) && |
1468 | !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && | |
1469 | !test_bit(ICE_NEEDS_RESTART, pf->state)) | |
940b61af AV |
1470 | queue_work(ice_wq, &pf->serv_task); |
1471 | } | |
1472 | ||
1473 | /** | |
1474 | * ice_service_task_complete - finish up the service task | |
1475 | * @pf: board private structure | |
1476 | */ | |
1477 | static void ice_service_task_complete(struct ice_pf *pf) | |
1478 | { | |
7e408e07 | 1479 | WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); |
940b61af AV |
1480 | |
1481 | /* force memory (pf->state) to sync before next service task */ | |
1482 | smp_mb__before_atomic(); | |
7e408e07 | 1483 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
940b61af AV |
1484 | } |
1485 | ||
8d81fa55 AA |
1486 | /** |
1487 | * ice_service_task_stop - stop service task and cancel works | |
1488 | * @pf: board private structure | |
769c500d | 1489 | * |
7e408e07 | 1490 | * Return 0 if the ICE_SERVICE_DIS bit was not already set, |
769c500d | 1491 | * 1 otherwise. |
8d81fa55 | 1492 | */ |
769c500d | 1493 | static int ice_service_task_stop(struct ice_pf *pf) |
8d81fa55 | 1494 | { |
769c500d AA |
1495 | int ret; |
1496 | ||
7e408e07 | 1497 | ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); |
8d81fa55 AA |
1498 | |
1499 | if (pf->serv_tmr.function) | |
1500 | del_timer_sync(&pf->serv_tmr); | |
1501 | if (pf->serv_task.func) | |
1502 | cancel_work_sync(&pf->serv_task); | |
1503 | ||
7e408e07 | 1504 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
769c500d | 1505 | return ret; |
8d81fa55 AA |
1506 | } |
1507 | ||
5995b6d0 BC |
1508 | /** |
1509 | * ice_service_task_restart - restart service task and schedule works | |
1510 | * @pf: board private structure | |
1511 | * | |
1512 | * This function is needed for suspend and resume works (e.g WoL scenario) | |
1513 | */ | |
1514 | static void ice_service_task_restart(struct ice_pf *pf) | |
1515 | { | |
7e408e07 | 1516 | clear_bit(ICE_SERVICE_DIS, pf->state); |
5995b6d0 BC |
1517 | ice_service_task_schedule(pf); |
1518 | } | |
1519 | ||
940b61af AV |
1520 | /** |
1521 | * ice_service_timer - timer callback to schedule service task | |
1522 | * @t: pointer to timer_list | |
1523 | */ | |
1524 | static void ice_service_timer(struct timer_list *t) | |
1525 | { | |
1526 | struct ice_pf *pf = from_timer(pf, t, serv_tmr); | |
1527 | ||
1528 | mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); | |
1529 | ice_service_task_schedule(pf); | |
1530 | } | |
1531 | ||
b3969fd7 SM |
1532 | /** |
1533 | * ice_handle_mdd_event - handle malicious driver detect event | |
1534 | * @pf: pointer to the PF structure | |
1535 | * | |
9d5c5a52 PG |
1536 | * Called from service task. OICR interrupt handler indicates MDD event. |
1537 | * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log | |
1538 | * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events | |
1539 | * disable the queue, the PF can be configured to reset the VF using ethtool | |
1540 | * private flag mdd-auto-reset-vf. | |
b3969fd7 SM |
1541 | */ |
1542 | static void ice_handle_mdd_event(struct ice_pf *pf) | |
1543 | { | |
4015d11e | 1544 | struct device *dev = ice_pf_to_dev(pf); |
b3969fd7 | 1545 | struct ice_hw *hw = &pf->hw; |
c1e08830 | 1546 | unsigned int i; |
b3969fd7 SM |
1547 | u32 reg; |
1548 | ||
7e408e07 | 1549 | if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { |
9d5c5a52 PG |
1550 | /* Since the VF MDD event logging is rate limited, check if |
1551 | * there are pending MDD events. | |
1552 | */ | |
1553 | ice_print_vfs_mdd_events(pf); | |
b3969fd7 | 1554 | return; |
9d5c5a52 | 1555 | } |
b3969fd7 | 1556 | |
9d5c5a52 | 1557 | /* find what triggered an MDD event */ |
b3969fd7 SM |
1558 | reg = rd32(hw, GL_MDET_TX_PQM); |
1559 | if (reg & GL_MDET_TX_PQM_VALID_M) { | |
1560 | u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> | |
1561 | GL_MDET_TX_PQM_PF_NUM_S; | |
1562 | u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> | |
1563 | GL_MDET_TX_PQM_VF_NUM_S; | |
1564 | u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> | |
1565 | GL_MDET_TX_PQM_MAL_TYPE_S; | |
1566 | u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> | |
1567 | GL_MDET_TX_PQM_QNUM_S); | |
1568 | ||
1569 | if (netif_msg_tx_err(pf)) | |
4015d11e | 1570 | dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
b3969fd7 SM |
1571 | event, queue, pf_num, vf_num); |
1572 | wr32(hw, GL_MDET_TX_PQM, 0xffffffff); | |
b3969fd7 SM |
1573 | } |
1574 | ||
1575 | reg = rd32(hw, GL_MDET_TX_TCLAN); | |
1576 | if (reg & GL_MDET_TX_TCLAN_VALID_M) { | |
1577 | u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> | |
1578 | GL_MDET_TX_TCLAN_PF_NUM_S; | |
1579 | u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> | |
1580 | GL_MDET_TX_TCLAN_VF_NUM_S; | |
1581 | u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> | |
1582 | GL_MDET_TX_TCLAN_MAL_TYPE_S; | |
1583 | u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> | |
1584 | GL_MDET_TX_TCLAN_QNUM_S); | |
1585 | ||
1d8bd992 | 1586 | if (netif_msg_tx_err(pf)) |
4015d11e | 1587 | dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", |
b3969fd7 SM |
1588 | event, queue, pf_num, vf_num); |
1589 | wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); | |
b3969fd7 SM |
1590 | } |
1591 | ||
1592 | reg = rd32(hw, GL_MDET_RX); | |
1593 | if (reg & GL_MDET_RX_VALID_M) { | |
1594 | u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> | |
1595 | GL_MDET_RX_PF_NUM_S; | |
1596 | u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> | |
1597 | GL_MDET_RX_VF_NUM_S; | |
1598 | u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> | |
1599 | GL_MDET_RX_MAL_TYPE_S; | |
1600 | u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> | |
1601 | GL_MDET_RX_QNUM_S); | |
1602 | ||
1603 | if (netif_msg_rx_err(pf)) | |
4015d11e | 1604 | dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", |
b3969fd7 SM |
1605 | event, queue, pf_num, vf_num); |
1606 | wr32(hw, GL_MDET_RX, 0xffffffff); | |
b3969fd7 SM |
1607 | } |
1608 | ||
9d5c5a52 PG |
1609 | /* check to see if this PF caused an MDD event */ |
1610 | reg = rd32(hw, PF_MDET_TX_PQM); | |
1611 | if (reg & PF_MDET_TX_PQM_VALID_M) { | |
1612 | wr32(hw, PF_MDET_TX_PQM, 0xFFFF); | |
1613 | if (netif_msg_tx_err(pf)) | |
1614 | dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n"); | |
1615 | } | |
b3969fd7 | 1616 | |
9d5c5a52 PG |
1617 | reg = rd32(hw, PF_MDET_TX_TCLAN); |
1618 | if (reg & PF_MDET_TX_TCLAN_VALID_M) { | |
1619 | wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); | |
1620 | if (netif_msg_tx_err(pf)) | |
1621 | dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n"); | |
1622 | } | |
b3969fd7 | 1623 | |
9d5c5a52 PG |
1624 | reg = rd32(hw, PF_MDET_RX); |
1625 | if (reg & PF_MDET_RX_VALID_M) { | |
1626 | wr32(hw, PF_MDET_RX, 0xFFFF); | |
1627 | if (netif_msg_rx_err(pf)) | |
1628 | dev_info(dev, "Malicious Driver Detection event RX detected on PF\n"); | |
b3969fd7 SM |
1629 | } |
1630 | ||
9d5c5a52 PG |
1631 | /* Check to see if one of the VFs caused an MDD event, and then |
1632 | * increment counters and set print pending | |
1633 | */ | |
005881bc | 1634 | ice_for_each_vf(pf, i) { |
7c4bc1f5 AV |
1635 | struct ice_vf *vf = &pf->vf[i]; |
1636 | ||
1637 | reg = rd32(hw, VP_MDET_TX_PQM(i)); | |
1638 | if (reg & VP_MDET_TX_PQM_VALID_M) { | |
1639 | wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); | |
9d5c5a52 | 1640 | vf->mdd_tx_events.count++; |
7e408e07 | 1641 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1642 | if (netif_msg_tx_err(pf)) |
1643 | dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", | |
1644 | i); | |
7c4bc1f5 AV |
1645 | } |
1646 | ||
1647 | reg = rd32(hw, VP_MDET_TX_TCLAN(i)); | |
1648 | if (reg & VP_MDET_TX_TCLAN_VALID_M) { | |
1649 | wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); | |
9d5c5a52 | 1650 | vf->mdd_tx_events.count++; |
7e408e07 | 1651 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1652 | if (netif_msg_tx_err(pf)) |
1653 | dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", | |
1654 | i); | |
7c4bc1f5 AV |
1655 | } |
1656 | ||
1657 | reg = rd32(hw, VP_MDET_TX_TDPU(i)); | |
1658 | if (reg & VP_MDET_TX_TDPU_VALID_M) { | |
1659 | wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); | |
9d5c5a52 | 1660 | vf->mdd_tx_events.count++; |
7e408e07 | 1661 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1662 | if (netif_msg_tx_err(pf)) |
1663 | dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", | |
1664 | i); | |
7c4bc1f5 AV |
1665 | } |
1666 | ||
1667 | reg = rd32(hw, VP_MDET_RX(i)); | |
1668 | if (reg & VP_MDET_RX_VALID_M) { | |
1669 | wr32(hw, VP_MDET_RX(i), 0xFFFF); | |
9d5c5a52 | 1670 | vf->mdd_rx_events.count++; |
7e408e07 | 1671 | set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); |
9d5c5a52 PG |
1672 | if (netif_msg_rx_err(pf)) |
1673 | dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", | |
1674 | i); | |
1675 | ||
1676 | /* Since the queue is disabled on VF Rx MDD events, the | |
1677 | * PF can be configured to reset the VF through ethtool | |
1678 | * private flag mdd-auto-reset-vf. | |
1679 | */ | |
7438a3b0 PG |
1680 | if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { |
1681 | /* VF MDD event counters will be cleared by | |
1682 | * reset, so print the event prior to reset. | |
1683 | */ | |
1684 | ice_print_vf_rx_mdd_event(vf); | |
9d5c5a52 | 1685 | ice_reset_vf(&pf->vf[i], false); |
7438a3b0 | 1686 | } |
7c4bc1f5 AV |
1687 | } |
1688 | } | |
9d5c5a52 PG |
1689 | |
1690 | ice_print_vfs_mdd_events(pf); | |
b3969fd7 SM |
1691 | } |
1692 | ||
6d599946 TN |
1693 | /** |
1694 | * ice_force_phys_link_state - Force the physical link state | |
1695 | * @vsi: VSI to force the physical link state to up/down | |
1696 | * @link_up: true/false indicates to set the physical link to up/down | |
1697 | * | |
1698 | * Force the physical link state by getting the current PHY capabilities from | |
1699 | * hardware and setting the PHY config based on the determined capabilities. If | |
1700 | * link changes a link event will be triggered because both the Enable Automatic | |
1701 | * Link Update and LESM Enable bits are set when setting the PHY capabilities. | |
1702 | * | |
1703 | * Returns 0 on success, negative on failure | |
1704 | */ | |
1705 | static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) | |
1706 | { | |
1707 | struct ice_aqc_get_phy_caps_data *pcaps; | |
1708 | struct ice_aqc_set_phy_cfg_data *cfg; | |
1709 | struct ice_port_info *pi; | |
1710 | struct device *dev; | |
1711 | int retcode; | |
1712 | ||
1713 | if (!vsi || !vsi->port_info || !vsi->back) | |
1714 | return -EINVAL; | |
1715 | if (vsi->type != ICE_VSI_PF) | |
1716 | return 0; | |
1717 | ||
9a946843 | 1718 | dev = ice_pf_to_dev(vsi->back); |
6d599946 TN |
1719 | |
1720 | pi = vsi->port_info; | |
1721 | ||
9efe35d0 | 1722 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
6d599946 TN |
1723 | if (!pcaps) |
1724 | return -ENOMEM; | |
1725 | ||
d6730a87 | 1726 | retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
6d599946 TN |
1727 | NULL); |
1728 | if (retcode) { | |
19cce2c6 | 1729 | dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", |
6d599946 TN |
1730 | vsi->vsi_num, retcode); |
1731 | retcode = -EIO; | |
1732 | goto out; | |
1733 | } | |
1734 | ||
1735 | /* No change in link */ | |
1736 | if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && | |
1737 | link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) | |
1738 | goto out; | |
1739 | ||
1a3571b5 PG |
1740 | /* Use the current user PHY configuration. The current user PHY |
1741 | * configuration is initialized during probe from PHY capabilities | |
1742 | * software mode, and updated on set PHY configuration. | |
1743 | */ | |
1744 | cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); | |
6d599946 TN |
1745 | if (!cfg) { |
1746 | retcode = -ENOMEM; | |
1747 | goto out; | |
1748 | } | |
1749 | ||
1a3571b5 | 1750 | cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; |
6d599946 TN |
1751 | if (link_up) |
1752 | cfg->caps |= ICE_AQ_PHY_ENA_LINK; | |
1753 | else | |
1754 | cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; | |
1755 | ||
1a3571b5 | 1756 | retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); |
6d599946 TN |
1757 | if (retcode) { |
1758 | dev_err(dev, "Failed to set phy config, VSI %d error %d\n", | |
1759 | vsi->vsi_num, retcode); | |
1760 | retcode = -EIO; | |
1761 | } | |
1762 | ||
9efe35d0 | 1763 | kfree(cfg); |
6d599946 | 1764 | out: |
9efe35d0 | 1765 | kfree(pcaps); |
6d599946 TN |
1766 | return retcode; |
1767 | } | |
1768 | ||
1769 | /** | |
1a3571b5 PG |
1770 | * ice_init_nvm_phy_type - Initialize the NVM PHY type |
1771 | * @pi: port info structure | |
1772 | * | |
ea78ce4d | 1773 | * Initialize nvm_phy_type_[low|high] for link lenient mode support |
1a3571b5 PG |
1774 | */ |
1775 | static int ice_init_nvm_phy_type(struct ice_port_info *pi) | |
1776 | { | |
1777 | struct ice_aqc_get_phy_caps_data *pcaps; | |
1778 | struct ice_pf *pf = pi->hw->back; | |
1779 | enum ice_status status; | |
1780 | int err = 0; | |
1781 | ||
1782 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); | |
1783 | if (!pcaps) | |
1784 | return -ENOMEM; | |
1785 | ||
d6730a87 | 1786 | status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, |
1a3571b5 PG |
1787 | NULL); |
1788 | ||
1789 | if (status) { | |
1790 | dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); | |
1791 | err = -EIO; | |
1792 | goto out; | |
1793 | } | |
1794 | ||
1795 | pf->nvm_phy_type_hi = pcaps->phy_type_high; | |
1796 | pf->nvm_phy_type_lo = pcaps->phy_type_low; | |
1797 | ||
1798 | out: | |
1799 | kfree(pcaps); | |
1800 | return err; | |
1801 | } | |
1802 | ||
ea78ce4d PG |
1803 | /** |
1804 | * ice_init_link_dflt_override - Initialize link default override | |
1805 | * @pi: port info structure | |
b4e813dd BA |
1806 | * |
1807 | * Initialize link default override and PHY total port shutdown during probe | |
ea78ce4d PG |
1808 | */ |
1809 | static void ice_init_link_dflt_override(struct ice_port_info *pi) | |
1810 | { | |
1811 | struct ice_link_default_override_tlv *ldo; | |
1812 | struct ice_pf *pf = pi->hw->back; | |
1813 | ||
1814 | ldo = &pf->link_dflt_override; | |
b4e813dd BA |
1815 | if (ice_get_link_default_override(ldo, pi)) |
1816 | return; | |
1817 | ||
1818 | if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) | |
1819 | return; | |
1820 | ||
1821 | /* Enable Total Port Shutdown (override/replace link-down-on-close | |
1822 | * ethtool private flag) for ports with Port Disable bit set. | |
1823 | */ | |
1824 | set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); | |
1825 | set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); | |
ea78ce4d PG |
1826 | } |
1827 | ||
1828 | /** | |
1829 | * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings | |
1830 | * @pi: port info structure | |
1831 | * | |
0a02944f | 1832 | * If default override is enabled, initialize the user PHY cfg speed and FEC |
ea78ce4d PG |
1833 | * settings using the default override mask from the NVM. |
1834 | * | |
1835 | * The PHY should only be configured with the default override settings the | |
7e408e07 | 1836 | * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state |
ea78ce4d PG |
1837 | * is used to indicate that the user PHY cfg default override is initialized |
1838 | * and the PHY has not been configured with the default override settings. The | |
1839 | * state is set here, and cleared in ice_configure_phy the first time the PHY is | |
1840 | * configured. | |
0a02944f AV |
1841 | * |
1842 | * This function should be called only if the FW doesn't support default | |
1843 | * configuration mode, as reported by ice_fw_supports_report_dflt_cfg. | |
ea78ce4d PG |
1844 | */ |
1845 | static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) | |
1846 | { | |
1847 | struct ice_link_default_override_tlv *ldo; | |
1848 | struct ice_aqc_set_phy_cfg_data *cfg; | |
1849 | struct ice_phy_info *phy = &pi->phy; | |
1850 | struct ice_pf *pf = pi->hw->back; | |
1851 | ||
1852 | ldo = &pf->link_dflt_override; | |
1853 | ||
1854 | /* If link default override is enabled, use to mask NVM PHY capabilities | |
1855 | * for speed and FEC default configuration. | |
1856 | */ | |
1857 | cfg = &phy->curr_user_phy_cfg; | |
1858 | ||
1859 | if (ldo->phy_type_low || ldo->phy_type_high) { | |
1860 | cfg->phy_type_low = pf->nvm_phy_type_lo & | |
1861 | cpu_to_le64(ldo->phy_type_low); | |
1862 | cfg->phy_type_high = pf->nvm_phy_type_hi & | |
1863 | cpu_to_le64(ldo->phy_type_high); | |
1864 | } | |
1865 | cfg->link_fec_opt = ldo->fec_options; | |
1866 | phy->curr_user_fec_req = ICE_FEC_AUTO; | |
1867 | ||
7e408e07 | 1868 | set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); |
ea78ce4d PG |
1869 | } |
1870 | ||
1a3571b5 PG |
1871 | /** |
1872 | * ice_init_phy_user_cfg - Initialize the PHY user configuration | |
1873 | * @pi: port info structure | |
1874 | * | |
1875 | * Initialize the current user PHY configuration, speed, FEC, and FC requested | |
1876 | * mode to default. The PHY defaults are from get PHY capabilities topology | |
1877 | * with media so call when media is first available. An error is returned if | |
1878 | * called when media is not available. The PHY initialization completed state is | |
1879 | * set here. | |
1880 | * | |
1881 | * These configurations are used when setting PHY | |
1882 | * configuration. The user PHY configuration is updated on set PHY | |
1883 | * configuration. Returns 0 on success, negative on failure | |
1884 | */ | |
1885 | static int ice_init_phy_user_cfg(struct ice_port_info *pi) | |
1886 | { | |
1887 | struct ice_aqc_get_phy_caps_data *pcaps; | |
1888 | struct ice_phy_info *phy = &pi->phy; | |
1889 | struct ice_pf *pf = pi->hw->back; | |
1890 | enum ice_status status; | |
1a3571b5 PG |
1891 | int err = 0; |
1892 | ||
1893 | if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) | |
1894 | return -EIO; | |
1895 | ||
1a3571b5 PG |
1896 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); |
1897 | if (!pcaps) | |
1898 | return -ENOMEM; | |
1899 | ||
0a02944f AV |
1900 | if (ice_fw_supports_report_dflt_cfg(pi->hw)) |
1901 | status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, | |
1902 | pcaps, NULL); | |
1903 | else | |
1904 | status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, | |
1905 | pcaps, NULL); | |
1a3571b5 PG |
1906 | if (status) { |
1907 | dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); | |
1908 | err = -EIO; | |
1909 | goto err_out; | |
1910 | } | |
1911 | ||
ea78ce4d PG |
1912 | ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); |
1913 | ||
1914 | /* check if lenient mode is supported and enabled */ | |
dc6aaa13 | 1915 | if (ice_fw_supports_link_override(pi->hw) && |
ea78ce4d PG |
1916 | !(pcaps->module_compliance_enforcement & |
1917 | ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { | |
1918 | set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); | |
1919 | ||
0a02944f AV |
1920 | /* if the FW supports default PHY configuration mode, then the driver |
1921 | * does not have to apply link override settings. If not, | |
1922 | * initialize user PHY configuration with link override values | |
ea78ce4d | 1923 | */ |
0a02944f AV |
1924 | if (!ice_fw_supports_report_dflt_cfg(pi->hw) && |
1925 | (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { | |
ea78ce4d PG |
1926 | ice_init_phy_cfg_dflt_override(pi); |
1927 | goto out; | |
1928 | } | |
1929 | } | |
1930 | ||
0a02944f AV |
1931 | /* if link default override is not enabled, set user flow control and |
1932 | * FEC settings based on what get_phy_caps returned | |
ea78ce4d | 1933 | */ |
1a3571b5 PG |
1934 | phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, |
1935 | pcaps->link_fec_options); | |
1936 | phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); | |
1937 | ||
ea78ce4d | 1938 | out: |
1a3571b5 | 1939 | phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; |
7e408e07 | 1940 | set_bit(ICE_PHY_INIT_COMPLETE, pf->state); |
1a3571b5 PG |
1941 | err_out: |
1942 | kfree(pcaps); | |
1943 | return err; | |
1944 | } | |
1945 | ||
1946 | /** | |
1947 | * ice_configure_phy - configure PHY | |
1948 | * @vsi: VSI of PHY | |
1949 | * | |
1950 | * Set the PHY configuration. If the current PHY configuration is the same as | |
1951 | * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise | |
1952 | * configure the based get PHY capabilities for topology with media. | |
1953 | */ | |
1954 | static int ice_configure_phy(struct ice_vsi *vsi) | |
1955 | { | |
1956 | struct device *dev = ice_pf_to_dev(vsi->back); | |
efc1eddb | 1957 | struct ice_port_info *pi = vsi->port_info; |
1a3571b5 PG |
1958 | struct ice_aqc_get_phy_caps_data *pcaps; |
1959 | struct ice_aqc_set_phy_cfg_data *cfg; | |
efc1eddb AV |
1960 | struct ice_phy_info *phy = &pi->phy; |
1961 | struct ice_pf *pf = vsi->back; | |
1a3571b5 PG |
1962 | enum ice_status status; |
1963 | int err = 0; | |
1964 | ||
1a3571b5 | 1965 | /* Ensure we have media as we cannot configure a medialess port */ |
efc1eddb | 1966 | if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) |
1a3571b5 PG |
1967 | return -EPERM; |
1968 | ||
1969 | ice_print_topo_conflict(vsi); | |
1970 | ||
4fc5fbee AV |
1971 | if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && |
1972 | phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) | |
1a3571b5 PG |
1973 | return -EPERM; |
1974 | ||
efc1eddb | 1975 | if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) |
1a3571b5 PG |
1976 | return ice_force_phys_link_state(vsi, true); |
1977 | ||
1978 | pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); | |
1979 | if (!pcaps) | |
1980 | return -ENOMEM; | |
1981 | ||
1982 | /* Get current PHY config */ | |
d6730a87 | 1983 | status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, |
1a3571b5 PG |
1984 | NULL); |
1985 | if (status) { | |
1986 | dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", | |
1987 | vsi->vsi_num, ice_stat_str(status)); | |
1988 | err = -EIO; | |
1989 | goto done; | |
1990 | } | |
1991 | ||
1992 | /* If PHY enable link is configured and configuration has not changed, | |
1993 | * there's nothing to do | |
1994 | */ | |
1995 | if (pcaps->caps & ICE_AQC_PHY_EN_LINK && | |
efc1eddb | 1996 | ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) |
1a3571b5 PG |
1997 | goto done; |
1998 | ||
1999 | /* Use PHY topology as baseline for configuration */ | |
2000 | memset(pcaps, 0, sizeof(*pcaps)); | |
0a02944f AV |
2001 | if (ice_fw_supports_report_dflt_cfg(pi->hw)) |
2002 | status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG, | |
2003 | pcaps, NULL); | |
2004 | else | |
2005 | status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, | |
2006 | pcaps, NULL); | |
1a3571b5 | 2007 | if (status) { |
0a02944f | 2008 | dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n", |
1a3571b5 PG |
2009 | vsi->vsi_num, ice_stat_str(status)); |
2010 | err = -EIO; | |
2011 | goto done; | |
2012 | } | |
2013 | ||
2014 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | |
2015 | if (!cfg) { | |
2016 | err = -ENOMEM; | |
2017 | goto done; | |
2018 | } | |
2019 | ||
ea78ce4d | 2020 | ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); |
1a3571b5 PG |
2021 | |
2022 | /* Speed - If default override pending, use curr_user_phy_cfg set in | |
2023 | * ice_init_phy_user_cfg_ldo. | |
2024 | */ | |
7e408e07 | 2025 | if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, |
ea78ce4d | 2026 | vsi->back->state)) { |
efc1eddb AV |
2027 | cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; |
2028 | cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; | |
ea78ce4d PG |
2029 | } else { |
2030 | u64 phy_low = 0, phy_high = 0; | |
2031 | ||
2032 | ice_update_phy_type(&phy_low, &phy_high, | |
2033 | pi->phy.curr_user_speed_req); | |
2034 | cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); | |
2035 | cfg->phy_type_high = pcaps->phy_type_high & | |
2036 | cpu_to_le64(phy_high); | |
2037 | } | |
1a3571b5 PG |
2038 | |
2039 | /* Can't provide what was requested; use PHY capabilities */ | |
2040 | if (!cfg->phy_type_low && !cfg->phy_type_high) { | |
2041 | cfg->phy_type_low = pcaps->phy_type_low; | |
2042 | cfg->phy_type_high = pcaps->phy_type_high; | |
2043 | } | |
2044 | ||
2045 | /* FEC */ | |
efc1eddb | 2046 | ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); |
1a3571b5 PG |
2047 | |
2048 | /* Can't provide what was requested; use PHY capabilities */ | |
2049 | if (cfg->link_fec_opt != | |
2050 | (cfg->link_fec_opt & pcaps->link_fec_options)) { | |
2051 | cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; | |
2052 | cfg->link_fec_opt = pcaps->link_fec_options; | |
2053 | } | |
2054 | ||
2055 | /* Flow Control - always supported; no need to check against | |
2056 | * capabilities | |
2057 | */ | |
efc1eddb | 2058 | ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); |
1a3571b5 PG |
2059 | |
2060 | /* Enable link and link update */ | |
2061 | cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; | |
2062 | ||
efc1eddb | 2063 | status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); |
1a3571b5 PG |
2064 | if (status) { |
2065 | dev_err(dev, "Failed to set phy config, VSI %d error %s\n", | |
2066 | vsi->vsi_num, ice_stat_str(status)); | |
2067 | err = -EIO; | |
2068 | } | |
2069 | ||
2070 | kfree(cfg); | |
2071 | done: | |
2072 | kfree(pcaps); | |
2073 | return err; | |
2074 | } | |
2075 | ||
2076 | /** | |
2077 | * ice_check_media_subtask - Check for media | |
6d599946 | 2078 | * @pf: pointer to PF struct |
1a3571b5 PG |
2079 | * |
2080 | * If media is available, then initialize PHY user configuration if it is not | |
2081 | * been, and configure the PHY if the interface is up. | |
6d599946 TN |
2082 | */ |
2083 | static void ice_check_media_subtask(struct ice_pf *pf) | |
2084 | { | |
2085 | struct ice_port_info *pi; | |
2086 | struct ice_vsi *vsi; | |
2087 | int err; | |
2088 | ||
1a3571b5 PG |
2089 | /* No need to check for media if it's already present */ |
2090 | if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) | |
6d599946 TN |
2091 | return; |
2092 | ||
1a3571b5 PG |
2093 | vsi = ice_get_main_vsi(pf); |
2094 | if (!vsi) | |
6d599946 TN |
2095 | return; |
2096 | ||
2097 | /* Refresh link info and check if media is present */ | |
2098 | pi = vsi->port_info; | |
2099 | err = ice_update_link_info(pi); | |
2100 | if (err) | |
2101 | return; | |
2102 | ||
c77849f5 AV |
2103 | ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); |
2104 | ||
6d599946 | 2105 | if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { |
7e408e07 | 2106 | if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) |
1a3571b5 PG |
2107 | ice_init_phy_user_cfg(pi); |
2108 | ||
2109 | /* PHY settings are reset on media insertion, reconfigure | |
2110 | * PHY to preserve settings. | |
2111 | */ | |
e97fb1ae | 2112 | if (test_bit(ICE_VSI_DOWN, vsi->state) && |
1a3571b5 | 2113 | test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) |
6d599946 | 2114 | return; |
1a3571b5 PG |
2115 | |
2116 | err = ice_configure_phy(vsi); | |
2117 | if (!err) | |
2118 | clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); | |
6d599946 TN |
2119 | |
2120 | /* A Link Status Event will be generated; the event handler | |
2121 | * will complete bringing the interface up | |
2122 | */ | |
2123 | } | |
2124 | } | |
2125 | ||
940b61af AV |
2126 | /** |
2127 | * ice_service_task - manage and run subtasks | |
2128 | * @work: pointer to work_struct contained by the PF struct | |
2129 | */ | |
2130 | static void ice_service_task(struct work_struct *work) | |
2131 | { | |
2132 | struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); | |
2133 | unsigned long start_time = jiffies; | |
2134 | ||
2135 | /* subtasks */ | |
0b28b702 AV |
2136 | |
2137 | /* process reset requests first */ | |
2138 | ice_reset_subtask(pf); | |
2139 | ||
0f9d5027 | 2140 | /* bail if a reset/recovery cycle is pending or rebuild failed */ |
5df7e45d | 2141 | if (ice_is_reset_in_progress(pf->state) || |
7e408e07 AV |
2142 | test_bit(ICE_SUSPENDED, pf->state) || |
2143 | test_bit(ICE_NEEDS_RESTART, pf->state)) { | |
0b28b702 AV |
2144 | ice_service_task_complete(pf); |
2145 | return; | |
2146 | } | |
2147 | ||
462acf6a | 2148 | ice_clean_adminq_subtask(pf); |
6d599946 | 2149 | ice_check_media_subtask(pf); |
b3969fd7 | 2150 | ice_check_for_hang_subtask(pf); |
e94d4478 | 2151 | ice_sync_fltr_subtask(pf); |
b3969fd7 | 2152 | ice_handle_mdd_event(pf); |
fcea6f3d | 2153 | ice_watchdog_subtask(pf); |
462acf6a TN |
2154 | |
2155 | if (ice_is_safe_mode(pf)) { | |
2156 | ice_service_task_complete(pf); | |
2157 | return; | |
2158 | } | |
2159 | ||
2160 | ice_process_vflr_event(pf); | |
75d2b253 | 2161 | ice_clean_mailboxq_subtask(pf); |
8f5ee3c4 | 2162 | ice_clean_sbq_subtask(pf); |
28bf2672 | 2163 | ice_sync_arfs_fltrs(pf); |
d6218317 | 2164 | ice_flush_fdir_ctx(pf); |
7e408e07 AV |
2165 | |
2166 | /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */ | |
940b61af AV |
2167 | ice_service_task_complete(pf); |
2168 | ||
2169 | /* If the tasks have taken longer than one service timer period | |
2170 | * or there is more work to be done, reset the service timer to | |
2171 | * schedule the service task now. | |
2172 | */ | |
2173 | if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || | |
7e408e07 AV |
2174 | test_bit(ICE_MDD_EVENT_PENDING, pf->state) || |
2175 | test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || | |
2176 | test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || | |
2177 | test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || | |
8f5ee3c4 | 2178 | test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || |
7e408e07 | 2179 | test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) |
940b61af AV |
2180 | mod_timer(&pf->serv_tmr, jiffies); |
2181 | } | |
2182 | ||
f31e4b6f AV |
2183 | /** |
2184 | * ice_set_ctrlq_len - helper function to set controlq length | |
f9867df6 | 2185 | * @hw: pointer to the HW instance |
f31e4b6f AV |
2186 | */ |
2187 | static void ice_set_ctrlq_len(struct ice_hw *hw) | |
2188 | { | |
2189 | hw->adminq.num_rq_entries = ICE_AQ_LEN; | |
2190 | hw->adminq.num_sq_entries = ICE_AQ_LEN; | |
2191 | hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; | |
2192 | hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; | |
c8a1071d | 2193 | hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; |
11836214 | 2194 | hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; |
75d2b253 AV |
2195 | hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; |
2196 | hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; | |
8f5ee3c4 JK |
2197 | hw->sbq.num_rq_entries = ICE_SBQ_LEN; |
2198 | hw->sbq.num_sq_entries = ICE_SBQ_LEN; | |
2199 | hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; | |
2200 | hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; | |
f31e4b6f AV |
2201 | } |
2202 | ||
87324e74 HT |
2203 | /** |
2204 | * ice_schedule_reset - schedule a reset | |
2205 | * @pf: board private structure | |
2206 | * @reset: reset being requested | |
2207 | */ | |
2208 | int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) | |
2209 | { | |
2210 | struct device *dev = ice_pf_to_dev(pf); | |
2211 | ||
2212 | /* bail out if earlier reset has failed */ | |
7e408e07 | 2213 | if (test_bit(ICE_RESET_FAILED, pf->state)) { |
87324e74 HT |
2214 | dev_dbg(dev, "earlier reset has failed\n"); |
2215 | return -EIO; | |
2216 | } | |
2217 | /* bail if reset/recovery already in progress */ | |
2218 | if (ice_is_reset_in_progress(pf->state)) { | |
2219 | dev_dbg(dev, "Reset already in progress\n"); | |
2220 | return -EBUSY; | |
2221 | } | |
2222 | ||
f9f5301e DE |
2223 | ice_unplug_aux_dev(pf); |
2224 | ||
87324e74 HT |
2225 | switch (reset) { |
2226 | case ICE_RESET_PFR: | |
7e408e07 | 2227 | set_bit(ICE_PFR_REQ, pf->state); |
87324e74 HT |
2228 | break; |
2229 | case ICE_RESET_CORER: | |
7e408e07 | 2230 | set_bit(ICE_CORER_REQ, pf->state); |
87324e74 HT |
2231 | break; |
2232 | case ICE_RESET_GLOBR: | |
7e408e07 | 2233 | set_bit(ICE_GLOBR_REQ, pf->state); |
87324e74 HT |
2234 | break; |
2235 | default: | |
2236 | return -EINVAL; | |
2237 | } | |
2238 | ||
2239 | ice_service_task_schedule(pf); | |
2240 | return 0; | |
2241 | } | |
2242 | ||
cdedef59 AV |
2243 | /** |
2244 | * ice_irq_affinity_notify - Callback for affinity changes | |
2245 | * @notify: context as to what irq was changed | |
2246 | * @mask: the new affinity mask | |
2247 | * | |
2248 | * This is a callback function used by the irq_set_affinity_notifier function | |
2249 | * so that we may register to receive changes to the irq affinity masks. | |
2250 | */ | |
c8b7abdd BA |
2251 | static void |
2252 | ice_irq_affinity_notify(struct irq_affinity_notify *notify, | |
2253 | const cpumask_t *mask) | |
cdedef59 AV |
2254 | { |
2255 | struct ice_q_vector *q_vector = | |
2256 | container_of(notify, struct ice_q_vector, affinity_notify); | |
2257 | ||
2258 | cpumask_copy(&q_vector->affinity_mask, mask); | |
2259 | } | |
2260 | ||
2261 | /** | |
2262 | * ice_irq_affinity_release - Callback for affinity notifier release | |
2263 | * @ref: internal core kernel usage | |
2264 | * | |
2265 | * This is a callback function used by the irq_set_affinity_notifier function | |
2266 | * to inform the current notification subscriber that they will no longer | |
2267 | * receive notifications. | |
2268 | */ | |
2269 | static void ice_irq_affinity_release(struct kref __always_unused *ref) {} | |
2270 | ||
cdedef59 AV |
2271 | /** |
2272 | * ice_vsi_ena_irq - Enable IRQ for the given VSI | |
2273 | * @vsi: the VSI being configured | |
2274 | */ | |
2275 | static int ice_vsi_ena_irq(struct ice_vsi *vsi) | |
2276 | { | |
ba880734 BC |
2277 | struct ice_hw *hw = &vsi->back->hw; |
2278 | int i; | |
cdedef59 | 2279 | |
ba880734 BC |
2280 | ice_for_each_q_vector(vsi, i) |
2281 | ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); | |
cdedef59 AV |
2282 | |
2283 | ice_flush(hw); | |
2284 | return 0; | |
2285 | } | |
2286 | ||
cdedef59 AV |
2287 | /** |
2288 | * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI | |
2289 | * @vsi: the VSI being configured | |
2290 | * @basename: name for the vector | |
2291 | */ | |
2292 | static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) | |
2293 | { | |
2294 | int q_vectors = vsi->num_q_vectors; | |
2295 | struct ice_pf *pf = vsi->back; | |
cbe66bfe | 2296 | int base = vsi->base_vector; |
4015d11e | 2297 | struct device *dev; |
cdedef59 AV |
2298 | int rx_int_idx = 0; |
2299 | int tx_int_idx = 0; | |
2300 | int vector, err; | |
2301 | int irq_num; | |
2302 | ||
4015d11e | 2303 | dev = ice_pf_to_dev(pf); |
cdedef59 AV |
2304 | for (vector = 0; vector < q_vectors; vector++) { |
2305 | struct ice_q_vector *q_vector = vsi->q_vectors[vector]; | |
2306 | ||
2307 | irq_num = pf->msix_entries[base + vector].vector; | |
2308 | ||
2309 | if (q_vector->tx.ring && q_vector->rx.ring) { | |
2310 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | |
2311 | "%s-%s-%d", basename, "TxRx", rx_int_idx++); | |
2312 | tx_int_idx++; | |
2313 | } else if (q_vector->rx.ring) { | |
2314 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | |
2315 | "%s-%s-%d", basename, "rx", rx_int_idx++); | |
2316 | } else if (q_vector->tx.ring) { | |
2317 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | |
2318 | "%s-%s-%d", basename, "tx", tx_int_idx++); | |
2319 | } else { | |
2320 | /* skip this unused q_vector */ | |
2321 | continue; | |
2322 | } | |
da62c5ff QZ |
2323 | if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) |
2324 | err = devm_request_irq(dev, irq_num, vsi->irq_handler, | |
2325 | IRQF_SHARED, q_vector->name, | |
2326 | q_vector); | |
2327 | else | |
2328 | err = devm_request_irq(dev, irq_num, vsi->irq_handler, | |
2329 | 0, q_vector->name, q_vector); | |
cdedef59 | 2330 | if (err) { |
19cce2c6 AV |
2331 | netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", |
2332 | err); | |
cdedef59 AV |
2333 | goto free_q_irqs; |
2334 | } | |
2335 | ||
2336 | /* register for affinity change notifications */ | |
28bf2672 BC |
2337 | if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { |
2338 | struct irq_affinity_notify *affinity_notify; | |
2339 | ||
2340 | affinity_notify = &q_vector->affinity_notify; | |
2341 | affinity_notify->notify = ice_irq_affinity_notify; | |
2342 | affinity_notify->release = ice_irq_affinity_release; | |
2343 | irq_set_affinity_notifier(irq_num, affinity_notify); | |
2344 | } | |
cdedef59 AV |
2345 | |
2346 | /* assign the mask for this irq */ | |
2347 | irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); | |
2348 | } | |
2349 | ||
2350 | vsi->irqs_ready = true; | |
2351 | return 0; | |
2352 | ||
2353 | free_q_irqs: | |
2354 | while (vector) { | |
2355 | vector--; | |
28bf2672 BC |
2356 | irq_num = pf->msix_entries[base + vector].vector; |
2357 | if (!IS_ENABLED(CONFIG_RFS_ACCEL)) | |
2358 | irq_set_affinity_notifier(irq_num, NULL); | |
cdedef59 | 2359 | irq_set_affinity_hint(irq_num, NULL); |
4015d11e | 2360 | devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); |
cdedef59 AV |
2361 | } |
2362 | return err; | |
2363 | } | |
2364 | ||
efc2214b MF |
2365 | /** |
2366 | * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP | |
2367 | * @vsi: VSI to setup Tx rings used by XDP | |
2368 | * | |
2369 | * Return 0 on success and negative value on error | |
2370 | */ | |
2371 | static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) | |
2372 | { | |
9a946843 | 2373 | struct device *dev = ice_pf_to_dev(vsi->back); |
efc2214b MF |
2374 | int i; |
2375 | ||
2376 | for (i = 0; i < vsi->num_xdp_txq; i++) { | |
2377 | u16 xdp_q_idx = vsi->alloc_txq + i; | |
2378 | struct ice_ring *xdp_ring; | |
2379 | ||
2380 | xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL); | |
2381 | ||
2382 | if (!xdp_ring) | |
2383 | goto free_xdp_rings; | |
2384 | ||
2385 | xdp_ring->q_index = xdp_q_idx; | |
2386 | xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; | |
2387 | xdp_ring->ring_active = false; | |
2388 | xdp_ring->vsi = vsi; | |
2389 | xdp_ring->netdev = NULL; | |
2390 | xdp_ring->dev = dev; | |
2391 | xdp_ring->count = vsi->num_tx_desc; | |
b1d95cc2 | 2392 | WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); |
efc2214b MF |
2393 | if (ice_setup_tx_ring(xdp_ring)) |
2394 | goto free_xdp_rings; | |
2395 | ice_set_ring_xdp(xdp_ring); | |
1742b3d5 | 2396 | xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring); |
efc2214b MF |
2397 | } |
2398 | ||
2399 | return 0; | |
2400 | ||
2401 | free_xdp_rings: | |
2402 | for (; i >= 0; i--) | |
2403 | if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) | |
2404 | ice_free_tx_ring(vsi->xdp_rings[i]); | |
2405 | return -ENOMEM; | |
2406 | } | |
2407 | ||
2408 | /** | |
2409 | * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI | |
2410 | * @vsi: VSI to set the bpf prog on | |
2411 | * @prog: the bpf prog pointer | |
2412 | */ | |
2413 | static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) | |
2414 | { | |
2415 | struct bpf_prog *old_prog; | |
2416 | int i; | |
2417 | ||
2418 | old_prog = xchg(&vsi->xdp_prog, prog); | |
2419 | if (old_prog) | |
2420 | bpf_prog_put(old_prog); | |
2421 | ||
2422 | ice_for_each_rxq(vsi, i) | |
2423 | WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); | |
2424 | } | |
2425 | ||
2426 | /** | |
2427 | * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP | |
2428 | * @vsi: VSI to bring up Tx rings used by XDP | |
2429 | * @prog: bpf program that will be assigned to VSI | |
2430 | * | |
2431 | * Return 0 on success and negative value on error | |
2432 | */ | |
2433 | int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) | |
2434 | { | |
2435 | u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; | |
2436 | int xdp_rings_rem = vsi->num_xdp_txq; | |
2437 | struct ice_pf *pf = vsi->back; | |
2438 | struct ice_qs_cfg xdp_qs_cfg = { | |
2439 | .qs_mutex = &pf->avail_q_mutex, | |
2440 | .pf_map = pf->avail_txqs, | |
2441 | .pf_map_size = pf->max_pf_txqs, | |
2442 | .q_count = vsi->num_xdp_txq, | |
2443 | .scatter_count = ICE_MAX_SCATTER_TXQS, | |
2444 | .vsi_map = vsi->txq_map, | |
2445 | .vsi_map_offset = vsi->alloc_txq, | |
2446 | .mapping_mode = ICE_VSI_MAP_CONTIG | |
2447 | }; | |
2448 | enum ice_status status; | |
4015d11e | 2449 | struct device *dev; |
efc2214b MF |
2450 | int i, v_idx; |
2451 | ||
4015d11e BC |
2452 | dev = ice_pf_to_dev(pf); |
2453 | vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, | |
efc2214b MF |
2454 | sizeof(*vsi->xdp_rings), GFP_KERNEL); |
2455 | if (!vsi->xdp_rings) | |
2456 | return -ENOMEM; | |
2457 | ||
2458 | vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; | |
2459 | if (__ice_vsi_get_qs(&xdp_qs_cfg)) | |
2460 | goto err_map_xdp; | |
2461 | ||
2462 | if (ice_xdp_alloc_setup_rings(vsi)) | |
2463 | goto clear_xdp_rings; | |
2464 | ||
2465 | /* follow the logic from ice_vsi_map_rings_to_vectors */ | |
2466 | ice_for_each_q_vector(vsi, v_idx) { | |
2467 | struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; | |
2468 | int xdp_rings_per_v, q_id, q_base; | |
2469 | ||
2470 | xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, | |
2471 | vsi->num_q_vectors - v_idx); | |
2472 | q_base = vsi->num_xdp_txq - xdp_rings_rem; | |
2473 | ||
2474 | for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { | |
2475 | struct ice_ring *xdp_ring = vsi->xdp_rings[q_id]; | |
2476 | ||
2477 | xdp_ring->q_vector = q_vector; | |
2478 | xdp_ring->next = q_vector->tx.ring; | |
2479 | q_vector->tx.ring = xdp_ring; | |
2480 | } | |
2481 | xdp_rings_rem -= xdp_rings_per_v; | |
2482 | } | |
2483 | ||
2484 | /* omit the scheduler update if in reset path; XDP queues will be | |
2485 | * taken into account at the end of ice_vsi_rebuild, where | |
2486 | * ice_cfg_vsi_lan is being called | |
2487 | */ | |
2488 | if (ice_is_reset_in_progress(pf->state)) | |
2489 | return 0; | |
2490 | ||
2491 | /* tell the Tx scheduler that right now we have | |
2492 | * additional queues | |
2493 | */ | |
2494 | for (i = 0; i < vsi->tc_cfg.numtc; i++) | |
2495 | max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; | |
2496 | ||
2497 | status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, | |
2498 | max_txqs); | |
2499 | if (status) { | |
0fee3577 LY |
2500 | dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n", |
2501 | ice_stat_str(status)); | |
efc2214b MF |
2502 | goto clear_xdp_rings; |
2503 | } | |
2504 | ice_vsi_assign_bpf_prog(vsi, prog); | |
2505 | ||
2506 | return 0; | |
2507 | clear_xdp_rings: | |
2508 | for (i = 0; i < vsi->num_xdp_txq; i++) | |
2509 | if (vsi->xdp_rings[i]) { | |
2510 | kfree_rcu(vsi->xdp_rings[i], rcu); | |
2511 | vsi->xdp_rings[i] = NULL; | |
2512 | } | |
2513 | ||
2514 | err_map_xdp: | |
2515 | mutex_lock(&pf->avail_q_mutex); | |
2516 | for (i = 0; i < vsi->num_xdp_txq; i++) { | |
2517 | clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); | |
2518 | vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; | |
2519 | } | |
2520 | mutex_unlock(&pf->avail_q_mutex); | |
2521 | ||
4015d11e | 2522 | devm_kfree(dev, vsi->xdp_rings); |
efc2214b MF |
2523 | return -ENOMEM; |
2524 | } | |
2525 | ||
2526 | /** | |
2527 | * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings | |
2528 | * @vsi: VSI to remove XDP rings | |
2529 | * | |
2530 | * Detach XDP rings from irq vectors, clean up the PF bitmap and free | |
2531 | * resources | |
2532 | */ | |
2533 | int ice_destroy_xdp_rings(struct ice_vsi *vsi) | |
2534 | { | |
2535 | u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; | |
2536 | struct ice_pf *pf = vsi->back; | |
2537 | int i, v_idx; | |
2538 | ||
2539 | /* q_vectors are freed in reset path so there's no point in detaching | |
ac382a09 | 2540 | * rings; in case of rebuild being triggered not from reset bits |
efc2214b MF |
2541 | * in pf->state won't be set, so additionally check first q_vector |
2542 | * against NULL | |
2543 | */ | |
2544 | if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) | |
2545 | goto free_qmap; | |
2546 | ||
2547 | ice_for_each_q_vector(vsi, v_idx) { | |
2548 | struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; | |
2549 | struct ice_ring *ring; | |
2550 | ||
2551 | ice_for_each_ring(ring, q_vector->tx) | |
2552 | if (!ring->tx_buf || !ice_ring_is_xdp(ring)) | |
2553 | break; | |
2554 | ||
2555 | /* restore the value of last node prior to XDP setup */ | |
2556 | q_vector->tx.ring = ring; | |
2557 | } | |
2558 | ||
2559 | free_qmap: | |
2560 | mutex_lock(&pf->avail_q_mutex); | |
2561 | for (i = 0; i < vsi->num_xdp_txq; i++) { | |
2562 | clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); | |
2563 | vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; | |
2564 | } | |
2565 | mutex_unlock(&pf->avail_q_mutex); | |
2566 | ||
2567 | for (i = 0; i < vsi->num_xdp_txq; i++) | |
2568 | if (vsi->xdp_rings[i]) { | |
2569 | if (vsi->xdp_rings[i]->desc) | |
2570 | ice_free_tx_ring(vsi->xdp_rings[i]); | |
2571 | kfree_rcu(vsi->xdp_rings[i], rcu); | |
2572 | vsi->xdp_rings[i] = NULL; | |
2573 | } | |
2574 | ||
4015d11e | 2575 | devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); |
efc2214b MF |
2576 | vsi->xdp_rings = NULL; |
2577 | ||
2578 | if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) | |
2579 | return 0; | |
2580 | ||
2581 | ice_vsi_assign_bpf_prog(vsi, NULL); | |
2582 | ||
2583 | /* notify Tx scheduler that we destroyed XDP queues and bring | |
2584 | * back the old number of child nodes | |
2585 | */ | |
2586 | for (i = 0; i < vsi->tc_cfg.numtc; i++) | |
2587 | max_txqs[i] = vsi->num_txq; | |
2588 | ||
c8f135c6 MP |
2589 | /* change number of XDP Tx queues to 0 */ |
2590 | vsi->num_xdp_txq = 0; | |
2591 | ||
efc2214b MF |
2592 | return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, |
2593 | max_txqs); | |
2594 | } | |
2595 | ||
c7a21904 MS |
2596 | /** |
2597 | * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI | |
2598 | * @vsi: VSI to schedule napi on | |
2599 | */ | |
2600 | static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) | |
2601 | { | |
2602 | int i; | |
2603 | ||
2604 | ice_for_each_rxq(vsi, i) { | |
2605 | struct ice_ring *rx_ring = vsi->rx_rings[i]; | |
2606 | ||
2607 | if (rx_ring->xsk_pool) | |
2608 | napi_schedule(&rx_ring->q_vector->napi); | |
2609 | } | |
2610 | } | |
2611 | ||
efc2214b MF |
2612 | /** |
2613 | * ice_xdp_setup_prog - Add or remove XDP eBPF program | |
2614 | * @vsi: VSI to setup XDP for | |
2615 | * @prog: XDP program | |
2616 | * @extack: netlink extended ack | |
2617 | */ | |
2618 | static int | |
2619 | ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, | |
2620 | struct netlink_ext_ack *extack) | |
2621 | { | |
2622 | int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; | |
2623 | bool if_running = netif_running(vsi->netdev); | |
2624 | int ret = 0, xdp_ring_err = 0; | |
2625 | ||
2626 | if (frame_size > vsi->rx_buf_len) { | |
2627 | NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP"); | |
2628 | return -EOPNOTSUPP; | |
2629 | } | |
2630 | ||
2631 | /* need to stop netdev while setting up the program for Rx rings */ | |
e97fb1ae | 2632 | if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { |
efc2214b MF |
2633 | ret = ice_down(vsi); |
2634 | if (ret) { | |
af23635a | 2635 | NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); |
efc2214b MF |
2636 | return ret; |
2637 | } | |
2638 | } | |
2639 | ||
2640 | if (!ice_is_xdp_ena_vsi(vsi) && prog) { | |
ae15e0ba | 2641 | vsi->num_xdp_txq = vsi->alloc_rxq; |
efc2214b MF |
2642 | xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); |
2643 | if (xdp_ring_err) | |
af23635a | 2644 | NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); |
efc2214b MF |
2645 | } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { |
2646 | xdp_ring_err = ice_destroy_xdp_rings(vsi); | |
2647 | if (xdp_ring_err) | |
af23635a | 2648 | NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); |
efc2214b MF |
2649 | } else { |
2650 | ice_vsi_assign_bpf_prog(vsi, prog); | |
2651 | } | |
2652 | ||
2653 | if (if_running) | |
2654 | ret = ice_up(vsi); | |
2655 | ||
c7a21904 MS |
2656 | if (!ret && prog) |
2657 | ice_vsi_rx_napi_schedule(vsi); | |
2d4238f5 | 2658 | |
efc2214b MF |
2659 | return (ret || xdp_ring_err) ? -ENOMEM : 0; |
2660 | } | |
2661 | ||
ebc5399e MF |
2662 | /** |
2663 | * ice_xdp_safe_mode - XDP handler for safe mode | |
2664 | * @dev: netdevice | |
2665 | * @xdp: XDP command | |
2666 | */ | |
2667 | static int ice_xdp_safe_mode(struct net_device __always_unused *dev, | |
2668 | struct netdev_bpf *xdp) | |
2669 | { | |
2670 | NL_SET_ERR_MSG_MOD(xdp->extack, | |
2671 | "Please provide working DDP firmware package in order to use XDP\n" | |
2672 | "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); | |
2673 | return -EOPNOTSUPP; | |
2674 | } | |
2675 | ||
efc2214b MF |
2676 | /** |
2677 | * ice_xdp - implements XDP handler | |
2678 | * @dev: netdevice | |
2679 | * @xdp: XDP command | |
2680 | */ | |
2681 | static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) | |
2682 | { | |
2683 | struct ice_netdev_priv *np = netdev_priv(dev); | |
2684 | struct ice_vsi *vsi = np->vsi; | |
2685 | ||
2686 | if (vsi->type != ICE_VSI_PF) { | |
af23635a | 2687 | NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); |
efc2214b MF |
2688 | return -EINVAL; |
2689 | } | |
2690 | ||
2691 | switch (xdp->command) { | |
2692 | case XDP_SETUP_PROG: | |
2693 | return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); | |
1742b3d5 MK |
2694 | case XDP_SETUP_XSK_POOL: |
2695 | return ice_xsk_pool_setup(vsi, xdp->xsk.pool, | |
2d4238f5 | 2696 | xdp->xsk.queue_id); |
efc2214b MF |
2697 | default: |
2698 | return -EINVAL; | |
2699 | } | |
2700 | } | |
2701 | ||
940b61af AV |
2702 | /** |
2703 | * ice_ena_misc_vector - enable the non-queue interrupts | |
2704 | * @pf: board private structure | |
2705 | */ | |
2706 | static void ice_ena_misc_vector(struct ice_pf *pf) | |
2707 | { | |
2708 | struct ice_hw *hw = &pf->hw; | |
2709 | u32 val; | |
2710 | ||
9d5c5a52 PG |
2711 | /* Disable anti-spoof detection interrupt to prevent spurious event |
2712 | * interrupts during a function reset. Anti-spoof functionally is | |
2713 | * still supported. | |
2714 | */ | |
2715 | val = rd32(hw, GL_MDCK_TX_TDPU); | |
2716 | val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M; | |
2717 | wr32(hw, GL_MDCK_TX_TDPU, val); | |
2718 | ||
940b61af AV |
2719 | /* clear things first */ |
2720 | wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ | |
2721 | rd32(hw, PFINT_OICR); /* read to clear */ | |
2722 | ||
3bcd7fa3 | 2723 | val = (PFINT_OICR_ECC_ERR_M | |
940b61af AV |
2724 | PFINT_OICR_MAL_DETECT_M | |
2725 | PFINT_OICR_GRST_M | | |
2726 | PFINT_OICR_PCI_EXCEPTION_M | | |
007676b4 | 2727 | PFINT_OICR_VFLR_M | |
3bcd7fa3 | 2728 | PFINT_OICR_HMC_ERR_M | |
348048e7 | 2729 | PFINT_OICR_PE_PUSH_M | |
3bcd7fa3 | 2730 | PFINT_OICR_PE_CRITERR_M); |
940b61af AV |
2731 | |
2732 | wr32(hw, PFINT_OICR_ENA, val); | |
2733 | ||
2734 | /* SW_ITR_IDX = 0, but don't change INTENA */ | |
cbe66bfe | 2735 | wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), |
940b61af AV |
2736 | GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); |
2737 | } | |
2738 | ||
2739 | /** | |
2740 | * ice_misc_intr - misc interrupt handler | |
2741 | * @irq: interrupt number | |
2742 | * @data: pointer to a q_vector | |
2743 | */ | |
2744 | static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) | |
2745 | { | |
2746 | struct ice_pf *pf = (struct ice_pf *)data; | |
2747 | struct ice_hw *hw = &pf->hw; | |
2748 | irqreturn_t ret = IRQ_NONE; | |
4015d11e | 2749 | struct device *dev; |
940b61af AV |
2750 | u32 oicr, ena_mask; |
2751 | ||
4015d11e | 2752 | dev = ice_pf_to_dev(pf); |
7e408e07 AV |
2753 | set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); |
2754 | set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); | |
8f5ee3c4 | 2755 | set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); |
940b61af AV |
2756 | |
2757 | oicr = rd32(hw, PFINT_OICR); | |
2758 | ena_mask = rd32(hw, PFINT_OICR_ENA); | |
2759 | ||
0e674aeb AV |
2760 | if (oicr & PFINT_OICR_SWINT_M) { |
2761 | ena_mask &= ~PFINT_OICR_SWINT_M; | |
2762 | pf->sw_int_count++; | |
2763 | } | |
2764 | ||
b3969fd7 SM |
2765 | if (oicr & PFINT_OICR_MAL_DETECT_M) { |
2766 | ena_mask &= ~PFINT_OICR_MAL_DETECT_M; | |
7e408e07 | 2767 | set_bit(ICE_MDD_EVENT_PENDING, pf->state); |
b3969fd7 | 2768 | } |
007676b4 | 2769 | if (oicr & PFINT_OICR_VFLR_M) { |
f844d521 | 2770 | /* disable any further VFLR event notifications */ |
7e408e07 | 2771 | if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { |
f844d521 BC |
2772 | u32 reg = rd32(hw, PFINT_OICR_ENA); |
2773 | ||
2774 | reg &= ~PFINT_OICR_VFLR_M; | |
2775 | wr32(hw, PFINT_OICR_ENA, reg); | |
2776 | } else { | |
2777 | ena_mask &= ~PFINT_OICR_VFLR_M; | |
7e408e07 | 2778 | set_bit(ICE_VFLR_EVENT_PENDING, pf->state); |
f844d521 | 2779 | } |
007676b4 | 2780 | } |
b3969fd7 | 2781 | |
0b28b702 AV |
2782 | if (oicr & PFINT_OICR_GRST_M) { |
2783 | u32 reset; | |
b3969fd7 | 2784 | |
0b28b702 AV |
2785 | /* we have a reset warning */ |
2786 | ena_mask &= ~PFINT_OICR_GRST_M; | |
2787 | reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> | |
2788 | GLGEN_RSTAT_RESET_TYPE_S; | |
2789 | ||
2790 | if (reset == ICE_RESET_CORER) | |
2791 | pf->corer_count++; | |
2792 | else if (reset == ICE_RESET_GLOBR) | |
2793 | pf->globr_count++; | |
ca4929b6 | 2794 | else if (reset == ICE_RESET_EMPR) |
0b28b702 | 2795 | pf->empr_count++; |
ca4929b6 | 2796 | else |
4015d11e | 2797 | dev_dbg(dev, "Invalid reset type %d\n", reset); |
0b28b702 AV |
2798 | |
2799 | /* If a reset cycle isn't already in progress, we set a bit in | |
2800 | * pf->state so that the service task can start a reset/rebuild. | |
0b28b702 | 2801 | */ |
7e408e07 | 2802 | if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { |
0b28b702 | 2803 | if (reset == ICE_RESET_CORER) |
7e408e07 | 2804 | set_bit(ICE_CORER_RECV, pf->state); |
0b28b702 | 2805 | else if (reset == ICE_RESET_GLOBR) |
7e408e07 | 2806 | set_bit(ICE_GLOBR_RECV, pf->state); |
0b28b702 | 2807 | else |
7e408e07 | 2808 | set_bit(ICE_EMPR_RECV, pf->state); |
0b28b702 | 2809 | |
fd2a9817 AV |
2810 | /* There are couple of different bits at play here. |
2811 | * hw->reset_ongoing indicates whether the hardware is | |
2812 | * in reset. This is set to true when a reset interrupt | |
2813 | * is received and set back to false after the driver | |
2814 | * has determined that the hardware is out of reset. | |
2815 | * | |
7e408e07 | 2816 | * ICE_RESET_OICR_RECV in pf->state indicates |
fd2a9817 AV |
2817 | * that a post reset rebuild is required before the |
2818 | * driver is operational again. This is set above. | |
2819 | * | |
2820 | * As this is the start of the reset/rebuild cycle, set | |
2821 | * both to indicate that. | |
2822 | */ | |
2823 | hw->reset_ongoing = true; | |
0b28b702 AV |
2824 | } |
2825 | } | |
2826 | ||
ea9b847c JK |
2827 | if (oicr & PFINT_OICR_TSYN_TX_M) { |
2828 | ena_mask &= ~PFINT_OICR_TSYN_TX_M; | |
2829 | ice_ptp_process_ts(pf); | |
2830 | } | |
2831 | ||
172db5f9 MM |
2832 | if (oicr & PFINT_OICR_TSYN_EVNT_M) { |
2833 | u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; | |
2834 | u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); | |
2835 | ||
2836 | /* Save EVENTs from GTSYN register */ | |
2837 | pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | | |
2838 | GLTSYN_STAT_EVENT1_M | | |
2839 | GLTSYN_STAT_EVENT2_M); | |
2840 | ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; | |
2841 | kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); | |
2842 | } | |
2843 | ||
348048e7 DE |
2844 | #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) |
2845 | if (oicr & ICE_AUX_CRIT_ERR) { | |
2846 | struct iidc_event *event; | |
2847 | ||
2848 | ena_mask &= ~ICE_AUX_CRIT_ERR; | |
2849 | event = kzalloc(sizeof(*event), GFP_KERNEL); | |
2850 | if (event) { | |
2851 | set_bit(IIDC_EVENT_CRIT_ERR, event->type); | |
2852 | /* report the entire OICR value to AUX driver */ | |
2853 | event->reg = oicr; | |
2854 | ice_send_event_to_aux(pf, event); | |
2855 | kfree(event); | |
2856 | } | |
940b61af AV |
2857 | } |
2858 | ||
8d7189d2 | 2859 | /* Report any remaining unexpected interrupts */ |
940b61af AV |
2860 | oicr &= ena_mask; |
2861 | if (oicr) { | |
4015d11e | 2862 | dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); |
940b61af AV |
2863 | /* If a critical error is pending there is no choice but to |
2864 | * reset the device. | |
2865 | */ | |
348048e7 | 2866 | if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | |
0b28b702 | 2867 | PFINT_OICR_ECC_ERR_M)) { |
7e408e07 | 2868 | set_bit(ICE_PFR_REQ, pf->state); |
0b28b702 AV |
2869 | ice_service_task_schedule(pf); |
2870 | } | |
940b61af AV |
2871 | } |
2872 | ret = IRQ_HANDLED; | |
2873 | ||
de75135b AV |
2874 | ice_service_task_schedule(pf); |
2875 | ice_irq_dynamic_ena(hw, NULL, NULL); | |
940b61af AV |
2876 | |
2877 | return ret; | |
2878 | } | |
2879 | ||
0e04e8e1 BC |
2880 | /** |
2881 | * ice_dis_ctrlq_interrupts - disable control queue interrupts | |
2882 | * @hw: pointer to HW structure | |
2883 | */ | |
2884 | static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) | |
2885 | { | |
2886 | /* disable Admin queue Interrupt causes */ | |
2887 | wr32(hw, PFINT_FW_CTL, | |
2888 | rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); | |
2889 | ||
2890 | /* disable Mailbox queue Interrupt causes */ | |
2891 | wr32(hw, PFINT_MBX_CTL, | |
2892 | rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); | |
2893 | ||
8f5ee3c4 JK |
2894 | wr32(hw, PFINT_SB_CTL, |
2895 | rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); | |
2896 | ||
0e04e8e1 BC |
2897 | /* disable Control queue Interrupt causes */ |
2898 | wr32(hw, PFINT_OICR_CTL, | |
2899 | rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); | |
2900 | ||
2901 | ice_flush(hw); | |
2902 | } | |
2903 | ||
940b61af AV |
2904 | /** |
2905 | * ice_free_irq_msix_misc - Unroll misc vector setup | |
2906 | * @pf: board private structure | |
2907 | */ | |
2908 | static void ice_free_irq_msix_misc(struct ice_pf *pf) | |
2909 | { | |
0e04e8e1 BC |
2910 | struct ice_hw *hw = &pf->hw; |
2911 | ||
2912 | ice_dis_ctrlq_interrupts(hw); | |
2913 | ||
940b61af | 2914 | /* disable OICR interrupt */ |
0e04e8e1 BC |
2915 | wr32(hw, PFINT_OICR_ENA, 0); |
2916 | ice_flush(hw); | |
940b61af | 2917 | |
ba880734 | 2918 | if (pf->msix_entries) { |
cbe66bfe | 2919 | synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); |
4015d11e | 2920 | devm_free_irq(ice_pf_to_dev(pf), |
cbe66bfe | 2921 | pf->msix_entries[pf->oicr_idx].vector, pf); |
940b61af AV |
2922 | } |
2923 | ||
eb0208ec | 2924 | pf->num_avail_sw_msix += 1; |
cbe66bfe | 2925 | ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); |
940b61af AV |
2926 | } |
2927 | ||
0e04e8e1 BC |
2928 | /** |
2929 | * ice_ena_ctrlq_interrupts - enable control queue interrupts | |
2930 | * @hw: pointer to HW structure | |
b07833a0 | 2931 | * @reg_idx: HW vector index to associate the control queue interrupts with |
0e04e8e1 | 2932 | */ |
b07833a0 | 2933 | static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) |
0e04e8e1 BC |
2934 | { |
2935 | u32 val; | |
2936 | ||
b07833a0 | 2937 | val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) | |
0e04e8e1 BC |
2938 | PFINT_OICR_CTL_CAUSE_ENA_M); |
2939 | wr32(hw, PFINT_OICR_CTL, val); | |
2940 | ||
2941 | /* enable Admin queue Interrupt causes */ | |
b07833a0 | 2942 | val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) | |
0e04e8e1 BC |
2943 | PFINT_FW_CTL_CAUSE_ENA_M); |
2944 | wr32(hw, PFINT_FW_CTL, val); | |
2945 | ||
2946 | /* enable Mailbox queue Interrupt causes */ | |
b07833a0 | 2947 | val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) | |
0e04e8e1 BC |
2948 | PFINT_MBX_CTL_CAUSE_ENA_M); |
2949 | wr32(hw, PFINT_MBX_CTL, val); | |
2950 | ||
8f5ee3c4 JK |
2951 | /* This enables Sideband queue Interrupt causes */ |
2952 | val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | | |
2953 | PFINT_SB_CTL_CAUSE_ENA_M); | |
2954 | wr32(hw, PFINT_SB_CTL, val); | |
2955 | ||
0e04e8e1 BC |
2956 | ice_flush(hw); |
2957 | } | |
2958 | ||
940b61af AV |
2959 | /** |
2960 | * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events | |
2961 | * @pf: board private structure | |
2962 | * | |
2963 | * This sets up the handler for MSIX 0, which is used to manage the | |
df17b7e0 | 2964 | * non-queue interrupts, e.g. AdminQ and errors. This is not used |
940b61af AV |
2965 | * when in MSI or Legacy interrupt mode. |
2966 | */ | |
2967 | static int ice_req_irq_msix_misc(struct ice_pf *pf) | |
2968 | { | |
4015d11e | 2969 | struct device *dev = ice_pf_to_dev(pf); |
940b61af AV |
2970 | struct ice_hw *hw = &pf->hw; |
2971 | int oicr_idx, err = 0; | |
940b61af AV |
2972 | |
2973 | if (!pf->int_name[0]) | |
2974 | snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", | |
4015d11e | 2975 | dev_driver_string(dev), dev_name(dev)); |
940b61af | 2976 | |
0b28b702 AV |
2977 | /* Do not request IRQ but do enable OICR interrupt since settings are |
2978 | * lost during reset. Note that this function is called only during | |
2979 | * rebuild path and not while reset is in progress. | |
2980 | */ | |
5df7e45d | 2981 | if (ice_is_reset_in_progress(pf->state)) |
0b28b702 AV |
2982 | goto skip_req_irq; |
2983 | ||
cbe66bfe BC |
2984 | /* reserve one vector in irq_tracker for misc interrupts */ |
2985 | oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); | |
940b61af AV |
2986 | if (oicr_idx < 0) |
2987 | return oicr_idx; | |
2988 | ||
eb0208ec | 2989 | pf->num_avail_sw_msix -= 1; |
88865fc4 | 2990 | pf->oicr_idx = (u16)oicr_idx; |
940b61af | 2991 | |
4015d11e | 2992 | err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, |
940b61af AV |
2993 | ice_misc_intr, 0, pf->int_name, pf); |
2994 | if (err) { | |
4015d11e | 2995 | dev_err(dev, "devm_request_irq for %s failed: %d\n", |
940b61af | 2996 | pf->int_name, err); |
cbe66bfe | 2997 | ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); |
eb0208ec | 2998 | pf->num_avail_sw_msix += 1; |
940b61af AV |
2999 | return err; |
3000 | } | |
3001 | ||
0b28b702 | 3002 | skip_req_irq: |
940b61af AV |
3003 | ice_ena_misc_vector(pf); |
3004 | ||
cbe66bfe BC |
3005 | ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); |
3006 | wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), | |
63f545ed | 3007 | ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); |
940b61af AV |
3008 | |
3009 | ice_flush(hw); | |
cdedef59 | 3010 | ice_irq_dynamic_ena(hw, NULL, NULL); |
940b61af AV |
3011 | |
3012 | return 0; | |
3013 | } | |
3014 | ||
3a858ba3 | 3015 | /** |
df0f8479 AV |
3016 | * ice_napi_add - register NAPI handler for the VSI |
3017 | * @vsi: VSI for which NAPI handler is to be registered | |
3a858ba3 | 3018 | * |
df0f8479 AV |
3019 | * This function is only called in the driver's load path. Registering the NAPI |
3020 | * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, | |
3021 | * reset/rebuild, etc.) | |
3a858ba3 | 3022 | */ |
df0f8479 | 3023 | static void ice_napi_add(struct ice_vsi *vsi) |
3a858ba3 | 3024 | { |
df0f8479 | 3025 | int v_idx; |
3a858ba3 | 3026 | |
df0f8479 | 3027 | if (!vsi->netdev) |
3a858ba3 | 3028 | return; |
3a858ba3 | 3029 | |
0c2561c8 | 3030 | ice_for_each_q_vector(vsi, v_idx) |
df0f8479 AV |
3031 | netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, |
3032 | ice_napi_poll, NAPI_POLL_WEIGHT); | |
3a858ba3 AV |
3033 | } |
3034 | ||
3035 | /** | |
462acf6a TN |
3036 | * ice_set_ops - set netdev and ethtools ops for the given netdev |
3037 | * @netdev: netdev instance | |
3a858ba3 | 3038 | */ |
462acf6a | 3039 | static void ice_set_ops(struct net_device *netdev) |
3a858ba3 | 3040 | { |
462acf6a TN |
3041 | struct ice_pf *pf = ice_netdev_to_pf(netdev); |
3042 | ||
3043 | if (ice_is_safe_mode(pf)) { | |
3044 | netdev->netdev_ops = &ice_netdev_safe_mode_ops; | |
3045 | ice_set_ethtool_safe_mode_ops(netdev); | |
3046 | return; | |
3047 | } | |
3048 | ||
3049 | netdev->netdev_ops = &ice_netdev_ops; | |
b20e6c17 | 3050 | netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; |
462acf6a TN |
3051 | ice_set_ethtool_ops(netdev); |
3052 | } | |
3053 | ||
3054 | /** | |
3055 | * ice_set_netdev_features - set features for the given netdev | |
3056 | * @netdev: netdev instance | |
3057 | */ | |
3058 | static void ice_set_netdev_features(struct net_device *netdev) | |
3059 | { | |
3060 | struct ice_pf *pf = ice_netdev_to_pf(netdev); | |
d76a60ba AV |
3061 | netdev_features_t csumo_features; |
3062 | netdev_features_t vlano_features; | |
3063 | netdev_features_t dflt_features; | |
3064 | netdev_features_t tso_features; | |
3a858ba3 | 3065 | |
462acf6a TN |
3066 | if (ice_is_safe_mode(pf)) { |
3067 | /* safe mode */ | |
3068 | netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; | |
3069 | netdev->hw_features = netdev->features; | |
3070 | return; | |
3071 | } | |
3a858ba3 | 3072 | |
d76a60ba AV |
3073 | dflt_features = NETIF_F_SG | |
3074 | NETIF_F_HIGHDMA | | |
148beb61 | 3075 | NETIF_F_NTUPLE | |
d76a60ba AV |
3076 | NETIF_F_RXHASH; |
3077 | ||
3078 | csumo_features = NETIF_F_RXCSUM | | |
3079 | NETIF_F_IP_CSUM | | |
cf909e19 | 3080 | NETIF_F_SCTP_CRC | |
d76a60ba AV |
3081 | NETIF_F_IPV6_CSUM; |
3082 | ||
3083 | vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | | |
3084 | NETIF_F_HW_VLAN_CTAG_TX | | |
3085 | NETIF_F_HW_VLAN_CTAG_RX; | |
3086 | ||
a4e82a81 TN |
3087 | tso_features = NETIF_F_TSO | |
3088 | NETIF_F_TSO_ECN | | |
3089 | NETIF_F_TSO6 | | |
3090 | NETIF_F_GSO_GRE | | |
3091 | NETIF_F_GSO_UDP_TUNNEL | | |
3092 | NETIF_F_GSO_GRE_CSUM | | |
3093 | NETIF_F_GSO_UDP_TUNNEL_CSUM | | |
3094 | NETIF_F_GSO_PARTIAL | | |
3095 | NETIF_F_GSO_IPXIP4 | | |
3096 | NETIF_F_GSO_IPXIP6 | | |
a54e3b8c | 3097 | NETIF_F_GSO_UDP_L4; |
d76a60ba | 3098 | |
a4e82a81 TN |
3099 | netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | |
3100 | NETIF_F_GSO_GRE_CSUM; | |
3a858ba3 | 3101 | /* set features that user can change */ |
d76a60ba AV |
3102 | netdev->hw_features = dflt_features | csumo_features | |
3103 | vlano_features | tso_features; | |
3a858ba3 | 3104 | |
a4e82a81 TN |
3105 | /* add support for HW_CSUM on packets with MPLS header */ |
3106 | netdev->mpls_features = NETIF_F_HW_CSUM; | |
3107 | ||
3a858ba3 AV |
3108 | /* enable features */ |
3109 | netdev->features |= netdev->hw_features; | |
d76a60ba AV |
3110 | /* encap and VLAN devices inherit default, csumo and tso features */ |
3111 | netdev->hw_enc_features |= dflt_features | csumo_features | | |
3112 | tso_features; | |
3113 | netdev->vlan_features |= dflt_features | csumo_features | | |
3114 | tso_features; | |
462acf6a TN |
3115 | } |
3116 | ||
3117 | /** | |
3118 | * ice_cfg_netdev - Allocate, configure and register a netdev | |
3119 | * @vsi: the VSI associated with the new netdev | |
3120 | * | |
3121 | * Returns 0 on success, negative value on failure | |
3122 | */ | |
3123 | static int ice_cfg_netdev(struct ice_vsi *vsi) | |
3124 | { | |
462acf6a TN |
3125 | struct ice_netdev_priv *np; |
3126 | struct net_device *netdev; | |
3127 | u8 mac_addr[ETH_ALEN]; | |
1adf7ead | 3128 | |
462acf6a TN |
3129 | netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, |
3130 | vsi->alloc_rxq); | |
1e23f076 AV |
3131 | if (!netdev) |
3132 | return -ENOMEM; | |
462acf6a | 3133 | |
a476d72a | 3134 | set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); |
462acf6a TN |
3135 | vsi->netdev = netdev; |
3136 | np = netdev_priv(netdev); | |
3137 | np->vsi = vsi; | |
3138 | ||
3139 | ice_set_netdev_features(netdev); | |
3140 | ||
3141 | ice_set_ops(netdev); | |
3a858ba3 AV |
3142 | |
3143 | if (vsi->type == ICE_VSI_PF) { | |
c73bf3bd | 3144 | SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); |
3a858ba3 | 3145 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); |
f3956ebb | 3146 | eth_hw_addr_set(netdev, mac_addr); |
3a858ba3 AV |
3147 | ether_addr_copy(netdev->perm_addr, mac_addr); |
3148 | } | |
3149 | ||
3150 | netdev->priv_flags |= IFF_UNICAST_FLT; | |
3151 | ||
462acf6a TN |
3152 | /* Setup netdev TC information */ |
3153 | ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); | |
cdedef59 | 3154 | |
3a858ba3 AV |
3155 | /* setup watchdog timeout value to be 5 second */ |
3156 | netdev->watchdog_timeo = 5 * HZ; | |
3157 | ||
3158 | netdev->min_mtu = ETH_MIN_MTU; | |
3159 | netdev->max_mtu = ICE_MAX_MTU; | |
3160 | ||
3a858ba3 AV |
3161 | return 0; |
3162 | } | |
3163 | ||
d76a60ba AV |
3164 | /** |
3165 | * ice_fill_rss_lut - Fill the RSS lookup table with default values | |
3166 | * @lut: Lookup table | |
3167 | * @rss_table_size: Lookup table size | |
3168 | * @rss_size: Range of queue number for hashing | |
3169 | */ | |
3170 | void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) | |
3171 | { | |
3172 | u16 i; | |
3173 | ||
3174 | for (i = 0; i < rss_table_size; i++) | |
3175 | lut[i] = i % rss_size; | |
3176 | } | |
3177 | ||
0f9d5027 AV |
3178 | /** |
3179 | * ice_pf_vsi_setup - Set up a PF VSI | |
3180 | * @pf: board private structure | |
3181 | * @pi: pointer to the port_info instance | |
3182 | * | |
0e674aeb AV |
3183 | * Returns pointer to the successfully allocated VSI software struct |
3184 | * on success, otherwise returns NULL on failure. | |
0f9d5027 AV |
3185 | */ |
3186 | static struct ice_vsi * | |
3187 | ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) | |
3188 | { | |
3189 | return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); | |
3190 | } | |
3191 | ||
148beb61 HT |
3192 | /** |
3193 | * ice_ctrl_vsi_setup - Set up a control VSI | |
3194 | * @pf: board private structure | |
3195 | * @pi: pointer to the port_info instance | |
3196 | * | |
3197 | * Returns pointer to the successfully allocated VSI software struct | |
3198 | * on success, otherwise returns NULL on failure. | |
3199 | */ | |
3200 | static struct ice_vsi * | |
3201 | ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) | |
3202 | { | |
3203 | return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID); | |
3204 | } | |
3205 | ||
0e674aeb AV |
3206 | /** |
3207 | * ice_lb_vsi_setup - Set up a loopback VSI | |
3208 | * @pf: board private structure | |
3209 | * @pi: pointer to the port_info instance | |
3210 | * | |
3211 | * Returns pointer to the successfully allocated VSI software struct | |
3212 | * on success, otherwise returns NULL on failure. | |
3213 | */ | |
3214 | struct ice_vsi * | |
3215 | ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) | |
3216 | { | |
3217 | return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID); | |
3218 | } | |
3219 | ||
d76a60ba | 3220 | /** |
f9867df6 | 3221 | * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload |
d76a60ba AV |
3222 | * @netdev: network interface to be adjusted |
3223 | * @proto: unused protocol | |
f9867df6 | 3224 | * @vid: VLAN ID to be added |
d76a60ba | 3225 | * |
f9867df6 | 3226 | * net_device_ops implementation for adding VLAN IDs |
d76a60ba | 3227 | */ |
c8b7abdd BA |
3228 | static int |
3229 | ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, | |
3230 | u16 vid) | |
d76a60ba AV |
3231 | { |
3232 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
3233 | struct ice_vsi *vsi = np->vsi; | |
5eda8afd | 3234 | int ret; |
d76a60ba | 3235 | |
42f3efef BC |
3236 | /* VLAN 0 is added by default during load/reset */ |
3237 | if (!vid) | |
3238 | return 0; | |
3239 | ||
3240 | /* Enable VLAN pruning when a VLAN other than 0 is added */ | |
3241 | if (!ice_vsi_is_vlan_pruning_ena(vsi)) { | |
5eda8afd | 3242 | ret = ice_cfg_vlan_pruning(vsi, true, false); |
4f74dcc1 BC |
3243 | if (ret) |
3244 | return ret; | |
3245 | } | |
3246 | ||
42f3efef BC |
3247 | /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged |
3248 | * packets aren't pruned by the device's internal switch on Rx | |
d76a60ba | 3249 | */ |
1b8f15b6 | 3250 | ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); |
bcf68ea1 | 3251 | if (!ret) |
e97fb1ae | 3252 | set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); |
5eda8afd AA |
3253 | |
3254 | return ret; | |
d76a60ba AV |
3255 | } |
3256 | ||
d76a60ba | 3257 | /** |
f9867df6 | 3258 | * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload |
d76a60ba AV |
3259 | * @netdev: network interface to be adjusted |
3260 | * @proto: unused protocol | |
f9867df6 | 3261 | * @vid: VLAN ID to be removed |
d76a60ba | 3262 | * |
f9867df6 | 3263 | * net_device_ops implementation for removing VLAN IDs |
d76a60ba | 3264 | */ |
c8b7abdd BA |
3265 | static int |
3266 | ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, | |
3267 | u16 vid) | |
d76a60ba AV |
3268 | { |
3269 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
3270 | struct ice_vsi *vsi = np->vsi; | |
5eda8afd | 3271 | int ret; |
d76a60ba | 3272 | |
42f3efef BC |
3273 | /* don't allow removal of VLAN 0 */ |
3274 | if (!vid) | |
3275 | return 0; | |
3276 | ||
4f74dcc1 BC |
3277 | /* Make sure ice_vsi_kill_vlan is successful before updating VLAN |
3278 | * information | |
d76a60ba | 3279 | */ |
5eda8afd AA |
3280 | ret = ice_vsi_kill_vlan(vsi, vid); |
3281 | if (ret) | |
3282 | return ret; | |
d76a60ba | 3283 | |
42f3efef BC |
3284 | /* Disable pruning when VLAN 0 is the only VLAN rule */ |
3285 | if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) | |
5eda8afd | 3286 | ret = ice_cfg_vlan_pruning(vsi, false, false); |
4f74dcc1 | 3287 | |
e97fb1ae | 3288 | set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state); |
5eda8afd | 3289 | return ret; |
d76a60ba AV |
3290 | } |
3291 | ||
3a858ba3 AV |
3292 | /** |
3293 | * ice_setup_pf_sw - Setup the HW switch on startup or after reset | |
3294 | * @pf: board private structure | |
3295 | * | |
3296 | * Returns 0 on success, negative value on failure | |
3297 | */ | |
3298 | static int ice_setup_pf_sw(struct ice_pf *pf) | |
3299 | { | |
3300 | struct ice_vsi *vsi; | |
3301 | int status = 0; | |
3302 | ||
5df7e45d | 3303 | if (ice_is_reset_in_progress(pf->state)) |
0f9d5027 AV |
3304 | return -EBUSY; |
3305 | ||
3306 | vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); | |
135f4b9e JK |
3307 | if (!vsi) |
3308 | return -ENOMEM; | |
3a858ba3 | 3309 | |
df0f8479 AV |
3310 | status = ice_cfg_netdev(vsi); |
3311 | if (status) { | |
3312 | status = -ENODEV; | |
3313 | goto unroll_vsi_setup; | |
3314 | } | |
efc2214b MF |
3315 | /* netdev has to be configured before setting frame size */ |
3316 | ice_vsi_cfg_frame_size(vsi); | |
df0f8479 | 3317 | |
b94b013e DE |
3318 | /* Setup DCB netlink interface */ |
3319 | ice_dcbnl_setup(vsi); | |
3320 | ||
df0f8479 AV |
3321 | /* registering the NAPI handler requires both the queues and |
3322 | * netdev to be created, which are done in ice_pf_vsi_setup() | |
3323 | * and ice_cfg_netdev() respectively | |
3324 | */ | |
3325 | ice_napi_add(vsi); | |
3326 | ||
28bf2672 BC |
3327 | status = ice_set_cpu_rx_rmap(vsi); |
3328 | if (status) { | |
3329 | dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", | |
3330 | vsi->vsi_num, status); | |
3331 | status = -EINVAL; | |
3332 | goto unroll_napi_add; | |
3333 | } | |
561f4379 | 3334 | status = ice_init_mac_fltr(pf); |
9daf8208 | 3335 | if (status) |
28bf2672 | 3336 | goto free_cpu_rx_map; |
9daf8208 | 3337 | |
9daf8208 AV |
3338 | return status; |
3339 | ||
28bf2672 BC |
3340 | free_cpu_rx_map: |
3341 | ice_free_cpu_rx_rmap(vsi); | |
3342 | ||
df0f8479 | 3343 | unroll_napi_add: |
3a858ba3 | 3344 | if (vsi) { |
df0f8479 | 3345 | ice_napi_del(vsi); |
3a858ba3 | 3346 | if (vsi->netdev) { |
a476d72a | 3347 | clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); |
3a858ba3 AV |
3348 | free_netdev(vsi->netdev); |
3349 | vsi->netdev = NULL; | |
3350 | } | |
df0f8479 | 3351 | } |
9daf8208 | 3352 | |
df0f8479 | 3353 | unroll_vsi_setup: |
135f4b9e | 3354 | ice_vsi_release(vsi); |
3a858ba3 AV |
3355 | return status; |
3356 | } | |
3357 | ||
940b61af | 3358 | /** |
8c243700 AV |
3359 | * ice_get_avail_q_count - Get count of queues in use |
3360 | * @pf_qmap: bitmap to get queue use count from | |
3361 | * @lock: pointer to a mutex that protects access to pf_qmap | |
3362 | * @size: size of the bitmap | |
940b61af | 3363 | */ |
8c243700 AV |
3364 | static u16 |
3365 | ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size) | |
940b61af | 3366 | { |
88865fc4 KK |
3367 | unsigned long bit; |
3368 | u16 count = 0; | |
940b61af | 3369 | |
8c243700 AV |
3370 | mutex_lock(lock); |
3371 | for_each_clear_bit(bit, pf_qmap, size) | |
3372 | count++; | |
3373 | mutex_unlock(lock); | |
940b61af | 3374 | |
8c243700 AV |
3375 | return count; |
3376 | } | |
d76a60ba | 3377 | |
8c243700 AV |
3378 | /** |
3379 | * ice_get_avail_txq_count - Get count of Tx queues in use | |
3380 | * @pf: pointer to an ice_pf instance | |
3381 | */ | |
3382 | u16 ice_get_avail_txq_count(struct ice_pf *pf) | |
3383 | { | |
3384 | return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, | |
3385 | pf->max_pf_txqs); | |
3386 | } | |
940b61af | 3387 | |
8c243700 AV |
3388 | /** |
3389 | * ice_get_avail_rxq_count - Get count of Rx queues in use | |
3390 | * @pf: pointer to an ice_pf instance | |
3391 | */ | |
3392 | u16 ice_get_avail_rxq_count(struct ice_pf *pf) | |
3393 | { | |
3394 | return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, | |
3395 | pf->max_pf_rxqs); | |
940b61af AV |
3396 | } |
3397 | ||
3398 | /** | |
3399 | * ice_deinit_pf - Unrolls initialziations done by ice_init_pf | |
3400 | * @pf: board private structure to initialize | |
3401 | */ | |
3402 | static void ice_deinit_pf(struct ice_pf *pf) | |
3403 | { | |
8d81fa55 | 3404 | ice_service_task_stop(pf); |
940b61af | 3405 | mutex_destroy(&pf->sw_mutex); |
b94b013e | 3406 | mutex_destroy(&pf->tc_mutex); |
940b61af | 3407 | mutex_destroy(&pf->avail_q_mutex); |
78b5713a AV |
3408 | |
3409 | if (pf->avail_txqs) { | |
3410 | bitmap_free(pf->avail_txqs); | |
3411 | pf->avail_txqs = NULL; | |
3412 | } | |
3413 | ||
3414 | if (pf->avail_rxqs) { | |
3415 | bitmap_free(pf->avail_rxqs); | |
3416 | pf->avail_rxqs = NULL; | |
3417 | } | |
06c16d89 JK |
3418 | |
3419 | if (pf->ptp.clock) | |
3420 | ptp_clock_unregister(pf->ptp.clock); | |
940b61af AV |
3421 | } |
3422 | ||
3423 | /** | |
462acf6a TN |
3424 | * ice_set_pf_caps - set PFs capability flags |
3425 | * @pf: pointer to the PF instance | |
940b61af | 3426 | */ |
462acf6a | 3427 | static void ice_set_pf_caps(struct ice_pf *pf) |
940b61af | 3428 | { |
462acf6a TN |
3429 | struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; |
3430 | ||
d25a0fc4 DE |
3431 | clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
3432 | clear_bit(ICE_FLAG_AUX_ENA, pf->flags); | |
3433 | if (func_caps->common_cap.rdma) { | |
3434 | set_bit(ICE_FLAG_RDMA_ENA, pf->flags); | |
3435 | set_bit(ICE_FLAG_AUX_ENA, pf->flags); | |
3436 | } | |
462acf6a TN |
3437 | clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
3438 | if (func_caps->common_cap.dcb) | |
80739b57 | 3439 | set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); |
462acf6a TN |
3440 | clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
3441 | if (func_caps->common_cap.sr_iov_1_1) { | |
75d2b253 | 3442 | set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); |
462acf6a | 3443 | pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, |
75d2b253 AV |
3444 | ICE_MAX_VF_COUNT); |
3445 | } | |
462acf6a TN |
3446 | clear_bit(ICE_FLAG_RSS_ENA, pf->flags); |
3447 | if (func_caps->common_cap.rss_table_size) | |
3448 | set_bit(ICE_FLAG_RSS_ENA, pf->flags); | |
940b61af | 3449 | |
148beb61 HT |
3450 | clear_bit(ICE_FLAG_FD_ENA, pf->flags); |
3451 | if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { | |
3452 | u16 unused; | |
3453 | ||
3454 | /* ctrl_vsi_idx will be set to a valid value when flow director | |
3455 | * is setup by ice_init_fdir | |
3456 | */ | |
3457 | pf->ctrl_vsi_idx = ICE_NO_VSI; | |
3458 | set_bit(ICE_FLAG_FD_ENA, pf->flags); | |
3459 | /* force guaranteed filter pool for PF */ | |
3460 | ice_alloc_fd_guar_item(&pf->hw, &unused, | |
3461 | func_caps->fd_fltr_guar); | |
3462 | /* force shared filter pool for PF */ | |
3463 | ice_alloc_fd_shrd_item(&pf->hw, &unused, | |
3464 | func_caps->fd_fltr_best_effort); | |
3465 | } | |
3466 | ||
06c16d89 JK |
3467 | clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); |
3468 | if (func_caps->common_cap.ieee_1588) | |
3469 | set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); | |
3470 | ||
462acf6a TN |
3471 | pf->max_pf_txqs = func_caps->common_cap.num_txq; |
3472 | pf->max_pf_rxqs = func_caps->common_cap.num_rxq; | |
3473 | } | |
940b61af | 3474 | |
462acf6a TN |
3475 | /** |
3476 | * ice_init_pf - Initialize general software structures (struct ice_pf) | |
3477 | * @pf: board private structure to initialize | |
3478 | */ | |
3479 | static int ice_init_pf(struct ice_pf *pf) | |
3480 | { | |
3481 | ice_set_pf_caps(pf); | |
3482 | ||
3483 | mutex_init(&pf->sw_mutex); | |
b94b013e | 3484 | mutex_init(&pf->tc_mutex); |
d76a60ba | 3485 | |
d69ea414 JK |
3486 | INIT_HLIST_HEAD(&pf->aq_wait_list); |
3487 | spin_lock_init(&pf->aq_wait_lock); | |
3488 | init_waitqueue_head(&pf->aq_wait_queue); | |
3489 | ||
1c08052e JK |
3490 | init_waitqueue_head(&pf->reset_wait_queue); |
3491 | ||
940b61af AV |
3492 | /* setup service timer and periodic service task */ |
3493 | timer_setup(&pf->serv_tmr, ice_service_timer, 0); | |
3494 | pf->serv_tmr_period = HZ; | |
3495 | INIT_WORK(&pf->serv_task, ice_service_task); | |
7e408e07 | 3496 | clear_bit(ICE_SERVICE_SCHED, pf->state); |
78b5713a | 3497 | |
462acf6a | 3498 | mutex_init(&pf->avail_q_mutex); |
78b5713a AV |
3499 | pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); |
3500 | if (!pf->avail_txqs) | |
3501 | return -ENOMEM; | |
3502 | ||
3503 | pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); | |
3504 | if (!pf->avail_rxqs) { | |
4015d11e | 3505 | devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); |
78b5713a AV |
3506 | pf->avail_txqs = NULL; |
3507 | return -ENOMEM; | |
3508 | } | |
3509 | ||
3510 | return 0; | |
940b61af AV |
3511 | } |
3512 | ||
3513 | /** | |
3514 | * ice_ena_msix_range - Request a range of MSIX vectors from the OS | |
3515 | * @pf: board private structure | |
3516 | * | |
3517 | * compute the number of MSIX vectors required (v_budget) and request from | |
3518 | * the OS. Return the number of vectors reserved or negative on failure | |
3519 | */ | |
3520 | static int ice_ena_msix_range(struct ice_pf *pf) | |
3521 | { | |
d25a0fc4 | 3522 | int num_cpus, v_left, v_actual, v_other, v_budget = 0; |
4015d11e | 3523 | struct device *dev = ice_pf_to_dev(pf); |
940b61af AV |
3524 | int needed, err, i; |
3525 | ||
3526 | v_left = pf->hw.func_caps.common_cap.num_msix_vectors; | |
d25a0fc4 | 3527 | num_cpus = num_online_cpus(); |
940b61af | 3528 | |
741106f7 TN |
3529 | /* reserve for LAN miscellaneous handler */ |
3530 | needed = ICE_MIN_LAN_OICR_MSIX; | |
152b978a AV |
3531 | if (v_left < needed) |
3532 | goto no_hw_vecs_left_err; | |
940b61af AV |
3533 | v_budget += needed; |
3534 | v_left -= needed; | |
3535 | ||
741106f7 | 3536 | /* reserve for flow director */ |
148beb61 HT |
3537 | if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
3538 | needed = ICE_FDIR_MSIX; | |
3539 | if (v_left < needed) | |
3540 | goto no_hw_vecs_left_err; | |
3541 | v_budget += needed; | |
3542 | v_left -= needed; | |
3543 | } | |
3544 | ||
741106f7 TN |
3545 | /* total used for non-traffic vectors */ |
3546 | v_other = v_budget; | |
3547 | ||
3548 | /* reserve vectors for LAN traffic */ | |
d25a0fc4 | 3549 | needed = num_cpus; |
741106f7 TN |
3550 | if (v_left < needed) |
3551 | goto no_hw_vecs_left_err; | |
3552 | pf->num_lan_msix = needed; | |
3553 | v_budget += needed; | |
3554 | v_left -= needed; | |
3555 | ||
d25a0fc4 DE |
3556 | /* reserve vectors for RDMA auxiliary driver */ |
3557 | if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { | |
3558 | needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; | |
3559 | if (v_left < needed) | |
3560 | goto no_hw_vecs_left_err; | |
3561 | pf->num_rdma_msix = needed; | |
3562 | v_budget += needed; | |
3563 | v_left -= needed; | |
3564 | } | |
3565 | ||
4015d11e | 3566 | pf->msix_entries = devm_kcalloc(dev, v_budget, |
c6dfd690 | 3567 | sizeof(*pf->msix_entries), GFP_KERNEL); |
940b61af AV |
3568 | if (!pf->msix_entries) { |
3569 | err = -ENOMEM; | |
3570 | goto exit_err; | |
3571 | } | |
3572 | ||
3573 | for (i = 0; i < v_budget; i++) | |
3574 | pf->msix_entries[i].entry = i; | |
3575 | ||
3576 | /* actually reserve the vectors */ | |
3577 | v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, | |
3578 | ICE_MIN_MSIX, v_budget); | |
940b61af | 3579 | if (v_actual < 0) { |
4015d11e | 3580 | dev_err(dev, "unable to reserve MSI-X vectors\n"); |
940b61af AV |
3581 | err = v_actual; |
3582 | goto msix_err; | |
3583 | } | |
3584 | ||
3585 | if (v_actual < v_budget) { | |
19cce2c6 | 3586 | dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", |
940b61af | 3587 | v_budget, v_actual); |
152b978a | 3588 | |
f3fe97f6 | 3589 | if (v_actual < ICE_MIN_MSIX) { |
152b978a | 3590 | /* error if we can't get minimum vectors */ |
940b61af AV |
3591 | pci_disable_msix(pf->pdev); |
3592 | err = -ERANGE; | |
3593 | goto msix_err; | |
152b978a | 3594 | } else { |
d25a0fc4 DE |
3595 | int v_remain = v_actual - v_other; |
3596 | int v_rdma = 0, v_min_rdma = 0; | |
3597 | ||
3598 | if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) { | |
3599 | /* Need at least 1 interrupt in addition to | |
3600 | * AEQ MSIX | |
3601 | */ | |
3602 | v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; | |
3603 | v_min_rdma = ICE_MIN_RDMA_MSIX; | |
3604 | } | |
741106f7 TN |
3605 | |
3606 | if (v_actual == ICE_MIN_MSIX || | |
d25a0fc4 DE |
3607 | v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { |
3608 | dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); | |
3609 | clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); | |
3610 | ||
3611 | pf->num_rdma_msix = 0; | |
741106f7 | 3612 | pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; |
d25a0fc4 DE |
3613 | } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || |
3614 | (v_remain - v_rdma < v_rdma)) { | |
3615 | /* Support minimum RDMA and give remaining | |
3616 | * vectors to LAN MSIX | |
3617 | */ | |
3618 | pf->num_rdma_msix = v_min_rdma; | |
3619 | pf->num_lan_msix = v_remain - v_min_rdma; | |
3620 | } else { | |
3621 | /* Split remaining MSIX with RDMA after | |
3622 | * accounting for AEQ MSIX | |
3623 | */ | |
3624 | pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + | |
3625 | ICE_RDMA_NUM_AEQ_MSIX; | |
3626 | pf->num_lan_msix = v_remain - pf->num_rdma_msix; | |
3627 | } | |
741106f7 TN |
3628 | |
3629 | dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", | |
3630 | pf->num_lan_msix); | |
d25a0fc4 DE |
3631 | |
3632 | if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) | |
3633 | dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", | |
3634 | pf->num_rdma_msix); | |
940b61af AV |
3635 | } |
3636 | } | |
3637 | ||
3638 | return v_actual; | |
3639 | ||
3640 | msix_err: | |
4015d11e | 3641 | devm_kfree(dev, pf->msix_entries); |
940b61af AV |
3642 | goto exit_err; |
3643 | ||
152b978a | 3644 | no_hw_vecs_left_err: |
19cce2c6 | 3645 | dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", |
152b978a AV |
3646 | needed, v_left); |
3647 | err = -ERANGE; | |
940b61af | 3648 | exit_err: |
d25a0fc4 | 3649 | pf->num_rdma_msix = 0; |
940b61af | 3650 | pf->num_lan_msix = 0; |
940b61af AV |
3651 | return err; |
3652 | } | |
3653 | ||
3654 | /** | |
3655 | * ice_dis_msix - Disable MSI-X interrupt setup in OS | |
3656 | * @pf: board private structure | |
3657 | */ | |
3658 | static void ice_dis_msix(struct ice_pf *pf) | |
3659 | { | |
3660 | pci_disable_msix(pf->pdev); | |
4015d11e | 3661 | devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); |
940b61af | 3662 | pf->msix_entries = NULL; |
940b61af AV |
3663 | } |
3664 | ||
eb0208ec PB |
3665 | /** |
3666 | * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme | |
3667 | * @pf: board private structure | |
3668 | */ | |
3669 | static void ice_clear_interrupt_scheme(struct ice_pf *pf) | |
3670 | { | |
ba880734 | 3671 | ice_dis_msix(pf); |
eb0208ec | 3672 | |
cbe66bfe | 3673 | if (pf->irq_tracker) { |
4015d11e | 3674 | devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); |
cbe66bfe | 3675 | pf->irq_tracker = NULL; |
eb0208ec PB |
3676 | } |
3677 | } | |
3678 | ||
940b61af AV |
3679 | /** |
3680 | * ice_init_interrupt_scheme - Determine proper interrupt scheme | |
3681 | * @pf: board private structure to initialize | |
3682 | */ | |
3683 | static int ice_init_interrupt_scheme(struct ice_pf *pf) | |
3684 | { | |
cbe66bfe | 3685 | int vectors; |
940b61af | 3686 | |
ba880734 | 3687 | vectors = ice_ena_msix_range(pf); |
940b61af AV |
3688 | |
3689 | if (vectors < 0) | |
3690 | return vectors; | |
3691 | ||
3692 | /* set up vector assignment tracking */ | |
e94c0df9 GS |
3693 | pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), |
3694 | struct_size(pf->irq_tracker, list, vectors), | |
3695 | GFP_KERNEL); | |
cbe66bfe | 3696 | if (!pf->irq_tracker) { |
940b61af AV |
3697 | ice_dis_msix(pf); |
3698 | return -ENOMEM; | |
3699 | } | |
3700 | ||
eb0208ec | 3701 | /* populate SW interrupts pool with number of OS granted IRQs. */ |
88865fc4 KK |
3702 | pf->num_avail_sw_msix = (u16)vectors; |
3703 | pf->irq_tracker->num_entries = (u16)vectors; | |
cbe66bfe | 3704 | pf->irq_tracker->end = pf->irq_tracker->num_entries; |
eb0208ec PB |
3705 | |
3706 | return 0; | |
940b61af AV |
3707 | } |
3708 | ||
769c500d | 3709 | /** |
31765519 AV |
3710 | * ice_is_wol_supported - check if WoL is supported |
3711 | * @hw: pointer to hardware info | |
769c500d AA |
3712 | * |
3713 | * Check if WoL is supported based on the HW configuration. | |
3714 | * Returns true if NVM supports and enables WoL for this port, false otherwise | |
3715 | */ | |
31765519 | 3716 | bool ice_is_wol_supported(struct ice_hw *hw) |
769c500d | 3717 | { |
769c500d AA |
3718 | u16 wol_ctrl; |
3719 | ||
3720 | /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control | |
3721 | * word) indicates WoL is not supported on the corresponding PF ID. | |
3722 | */ | |
3723 | if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) | |
3724 | return false; | |
3725 | ||
31765519 | 3726 | return !(BIT(hw->port_info->lport) & wol_ctrl); |
769c500d AA |
3727 | } |
3728 | ||
87324e74 HT |
3729 | /** |
3730 | * ice_vsi_recfg_qs - Change the number of queues on a VSI | |
3731 | * @vsi: VSI being changed | |
3732 | * @new_rx: new number of Rx queues | |
3733 | * @new_tx: new number of Tx queues | |
3734 | * | |
3735 | * Only change the number of queues if new_tx, or new_rx is non-0. | |
3736 | * | |
3737 | * Returns 0 on success. | |
3738 | */ | |
3739 | int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) | |
3740 | { | |
3741 | struct ice_pf *pf = vsi->back; | |
3742 | int err = 0, timeout = 50; | |
3743 | ||
3744 | if (!new_rx && !new_tx) | |
3745 | return -EINVAL; | |
3746 | ||
7e408e07 | 3747 | while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { |
87324e74 HT |
3748 | timeout--; |
3749 | if (!timeout) | |
3750 | return -EBUSY; | |
3751 | usleep_range(1000, 2000); | |
3752 | } | |
3753 | ||
3754 | if (new_tx) | |
88865fc4 | 3755 | vsi->req_txq = (u16)new_tx; |
87324e74 | 3756 | if (new_rx) |
88865fc4 | 3757 | vsi->req_rxq = (u16)new_rx; |
87324e74 HT |
3758 | |
3759 | /* set for the next time the netdev is started */ | |
3760 | if (!netif_running(vsi->netdev)) { | |
3761 | ice_vsi_rebuild(vsi, false); | |
3762 | dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); | |
3763 | goto done; | |
3764 | } | |
3765 | ||
3766 | ice_vsi_close(vsi); | |
3767 | ice_vsi_rebuild(vsi, false); | |
3768 | ice_pf_dcb_recfg(pf); | |
3769 | ice_vsi_open(vsi); | |
3770 | done: | |
7e408e07 | 3771 | clear_bit(ICE_CFG_BUSY, pf->state); |
87324e74 HT |
3772 | return err; |
3773 | } | |
3774 | ||
cd1f56f4 BC |
3775 | /** |
3776 | * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode | |
3777 | * @pf: PF to configure | |
3778 | * | |
3779 | * No VLAN offloads/filtering are advertised in safe mode so make sure the PF | |
3780 | * VSI can still Tx/Rx VLAN tagged packets. | |
3781 | */ | |
3782 | static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) | |
3783 | { | |
3784 | struct ice_vsi *vsi = ice_get_main_vsi(pf); | |
3785 | struct ice_vsi_ctx *ctxt; | |
3786 | enum ice_status status; | |
3787 | struct ice_hw *hw; | |
3788 | ||
3789 | if (!vsi) | |
3790 | return; | |
3791 | ||
3792 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | |
3793 | if (!ctxt) | |
3794 | return; | |
3795 | ||
3796 | hw = &pf->hw; | |
3797 | ctxt->info = vsi->info; | |
3798 | ||
3799 | ctxt->info.valid_sections = | |
3800 | cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | | |
3801 | ICE_AQ_VSI_PROP_SECURITY_VALID | | |
3802 | ICE_AQ_VSI_PROP_SW_VALID); | |
3803 | ||
3804 | /* disable VLAN anti-spoof */ | |
3805 | ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << | |
3806 | ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); | |
3807 | ||
3808 | /* disable VLAN pruning and keep all other settings */ | |
3809 | ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; | |
3810 | ||
3811 | /* allow all VLANs on Tx and don't strip on Rx */ | |
3812 | ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | | |
3813 | ICE_AQ_VSI_VLAN_EMOD_NOTHING; | |
3814 | ||
3815 | status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); | |
3816 | if (status) { | |
3817 | dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", | |
3818 | ice_stat_str(status), | |
3819 | ice_aq_str(hw->adminq.sq_last_status)); | |
3820 | } else { | |
3821 | vsi->info.sec_flags = ctxt->info.sec_flags; | |
3822 | vsi->info.sw_flags2 = ctxt->info.sw_flags2; | |
3823 | vsi->info.vlan_flags = ctxt->info.vlan_flags; | |
3824 | } | |
3825 | ||
3826 | kfree(ctxt); | |
3827 | } | |
3828 | ||
462acf6a TN |
3829 | /** |
3830 | * ice_log_pkg_init - log result of DDP package load | |
3831 | * @hw: pointer to hardware info | |
3832 | * @status: status of package load | |
3833 | */ | |
3834 | static void | |
3835 | ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) | |
3836 | { | |
3837 | struct ice_pf *pf = (struct ice_pf *)hw->back; | |
4015d11e | 3838 | struct device *dev = ice_pf_to_dev(pf); |
462acf6a TN |
3839 | |
3840 | switch (*status) { | |
3841 | case ICE_SUCCESS: | |
3842 | /* The package download AdminQ command returned success because | |
3843 | * this download succeeded or ICE_ERR_AQ_NO_WORK since there is | |
3844 | * already a package loaded on the device. | |
3845 | */ | |
3846 | if (hw->pkg_ver.major == hw->active_pkg_ver.major && | |
3847 | hw->pkg_ver.minor == hw->active_pkg_ver.minor && | |
3848 | hw->pkg_ver.update == hw->active_pkg_ver.update && | |
3849 | hw->pkg_ver.draft == hw->active_pkg_ver.draft && | |
3850 | !memcmp(hw->pkg_name, hw->active_pkg_name, | |
3851 | sizeof(hw->pkg_name))) { | |
3852 | if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST) | |
19cce2c6 | 3853 | dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n", |
462acf6a TN |
3854 | hw->active_pkg_name, |
3855 | hw->active_pkg_ver.major, | |
3856 | hw->active_pkg_ver.minor, | |
3857 | hw->active_pkg_ver.update, | |
3858 | hw->active_pkg_ver.draft); | |
3859 | else | |
19cce2c6 | 3860 | dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n", |
462acf6a TN |
3861 | hw->active_pkg_name, |
3862 | hw->active_pkg_ver.major, | |
3863 | hw->active_pkg_ver.minor, | |
3864 | hw->active_pkg_ver.update, | |
3865 | hw->active_pkg_ver.draft); | |
3866 | } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || | |
3867 | hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { | |
19cce2c6 | 3868 | dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", |
462acf6a TN |
3869 | hw->active_pkg_name, |
3870 | hw->active_pkg_ver.major, | |
3871 | hw->active_pkg_ver.minor, | |
3872 | ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); | |
3873 | *status = ICE_ERR_NOT_SUPPORTED; | |
3874 | } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && | |
3875 | hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { | |
19cce2c6 | 3876 | dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n", |
462acf6a TN |
3877 | hw->active_pkg_name, |
3878 | hw->active_pkg_ver.major, | |
3879 | hw->active_pkg_ver.minor, | |
3880 | hw->active_pkg_ver.update, | |
3881 | hw->active_pkg_ver.draft, | |
3882 | hw->pkg_name, | |
3883 | hw->pkg_ver.major, | |
3884 | hw->pkg_ver.minor, | |
3885 | hw->pkg_ver.update, | |
3886 | hw->pkg_ver.draft); | |
3887 | } else { | |
19cce2c6 | 3888 | dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n"); |
462acf6a TN |
3889 | *status = ICE_ERR_NOT_SUPPORTED; |
3890 | } | |
3891 | break; | |
b8272919 VR |
3892 | case ICE_ERR_FW_DDP_MISMATCH: |
3893 | dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); | |
3894 | break; | |
462acf6a | 3895 | case ICE_ERR_BUF_TOO_SHORT: |
462acf6a | 3896 | case ICE_ERR_CFG: |
19cce2c6 | 3897 | dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n"); |
462acf6a TN |
3898 | break; |
3899 | case ICE_ERR_NOT_SUPPORTED: | |
3900 | /* Package File version not supported */ | |
3901 | if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ || | |
3902 | (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && | |
3903 | hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR)) | |
19cce2c6 | 3904 | dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n"); |
462acf6a TN |
3905 | else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ || |
3906 | (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && | |
3907 | hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR)) | |
19cce2c6 | 3908 | dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n", |
462acf6a TN |
3909 | ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); |
3910 | break; | |
3911 | case ICE_ERR_AQ_ERROR: | |
e000248e | 3912 | switch (hw->pkg_dwnld_status) { |
462acf6a TN |
3913 | case ICE_AQ_RC_ENOSEC: |
3914 | case ICE_AQ_RC_EBADSIG: | |
19cce2c6 | 3915 | dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n"); |
462acf6a TN |
3916 | return; |
3917 | case ICE_AQ_RC_ESVN: | |
19cce2c6 | 3918 | dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n"); |
462acf6a TN |
3919 | return; |
3920 | case ICE_AQ_RC_EBADMAN: | |
3921 | case ICE_AQ_RC_EBADBUF: | |
19cce2c6 | 3922 | dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n"); |
9918f2d2 AV |
3923 | /* poll for reset to complete */ |
3924 | if (ice_check_reset(hw)) | |
3925 | dev_err(dev, "Error resetting device. Please reload the driver\n"); | |
462acf6a TN |
3926 | return; |
3927 | default: | |
3928 | break; | |
3929 | } | |
4e83fc93 | 3930 | fallthrough; |
462acf6a | 3931 | default: |
19cce2c6 | 3932 | dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n", |
462acf6a TN |
3933 | *status); |
3934 | break; | |
3935 | } | |
3936 | } | |
3937 | ||
3938 | /** | |
3939 | * ice_load_pkg - load/reload the DDP Package file | |
3940 | * @firmware: firmware structure when firmware requested or NULL for reload | |
3941 | * @pf: pointer to the PF instance | |
3942 | * | |
3943 | * Called on probe and post CORER/GLOBR rebuild to load DDP Package and | |
3944 | * initialize HW tables. | |
3945 | */ | |
3946 | static void | |
3947 | ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) | |
3948 | { | |
3949 | enum ice_status status = ICE_ERR_PARAM; | |
4015d11e | 3950 | struct device *dev = ice_pf_to_dev(pf); |
462acf6a TN |
3951 | struct ice_hw *hw = &pf->hw; |
3952 | ||
3953 | /* Load DDP Package */ | |
3954 | if (firmware && !hw->pkg_copy) { | |
3955 | status = ice_copy_and_init_pkg(hw, firmware->data, | |
3956 | firmware->size); | |
3957 | ice_log_pkg_init(hw, &status); | |
3958 | } else if (!firmware && hw->pkg_copy) { | |
3959 | /* Reload package during rebuild after CORER/GLOBR reset */ | |
3960 | status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); | |
3961 | ice_log_pkg_init(hw, &status); | |
3962 | } else { | |
19cce2c6 | 3963 | dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n"); |
462acf6a TN |
3964 | } |
3965 | ||
3966 | if (status) { | |
3967 | /* Safe Mode */ | |
3968 | clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); | |
3969 | return; | |
3970 | } | |
3971 | ||
3972 | /* Successful download package is the precondition for advanced | |
3973 | * features, hence setting the ICE_FLAG_ADV_FEATURES flag | |
3974 | */ | |
3975 | set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); | |
3976 | } | |
3977 | ||
c585ea42 BC |
3978 | /** |
3979 | * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines | |
3980 | * @pf: pointer to the PF structure | |
3981 | * | |
3982 | * There is no error returned here because the driver should be able to handle | |
3983 | * 128 Byte cache lines, so we only print a warning in case issues are seen, | |
3984 | * specifically with Tx. | |
3985 | */ | |
3986 | static void ice_verify_cacheline_size(struct ice_pf *pf) | |
3987 | { | |
3988 | if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) | |
19cce2c6 | 3989 | dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", |
c585ea42 BC |
3990 | ICE_CACHE_LINE_BYTES); |
3991 | } | |
3992 | ||
e3710a01 PSJ |
3993 | /** |
3994 | * ice_send_version - update firmware with driver version | |
3995 | * @pf: PF struct | |
3996 | * | |
3997 | * Returns ICE_SUCCESS on success, else error code | |
3998 | */ | |
3999 | static enum ice_status ice_send_version(struct ice_pf *pf) | |
4000 | { | |
4001 | struct ice_driver_ver dv; | |
4002 | ||
34a2a3b8 JK |
4003 | dv.major_ver = 0xff; |
4004 | dv.minor_ver = 0xff; | |
4005 | dv.build_ver = 0xff; | |
e3710a01 | 4006 | dv.subbuild_ver = 0; |
34a2a3b8 | 4007 | strscpy((char *)dv.driver_string, UTS_RELEASE, |
e3710a01 PSJ |
4008 | sizeof(dv.driver_string)); |
4009 | return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); | |
4010 | } | |
4011 | ||
148beb61 HT |
4012 | /** |
4013 | * ice_init_fdir - Initialize flow director VSI and configuration | |
4014 | * @pf: pointer to the PF instance | |
4015 | * | |
4016 | * returns 0 on success, negative on error | |
4017 | */ | |
4018 | static int ice_init_fdir(struct ice_pf *pf) | |
4019 | { | |
4020 | struct device *dev = ice_pf_to_dev(pf); | |
4021 | struct ice_vsi *ctrl_vsi; | |
4022 | int err; | |
4023 | ||
4024 | /* Side Band Flow Director needs to have a control VSI. | |
4025 | * Allocate it and store it in the PF. | |
4026 | */ | |
4027 | ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); | |
4028 | if (!ctrl_vsi) { | |
4029 | dev_dbg(dev, "could not create control VSI\n"); | |
4030 | return -ENOMEM; | |
4031 | } | |
4032 | ||
4033 | err = ice_vsi_open_ctrl(ctrl_vsi); | |
4034 | if (err) { | |
4035 | dev_dbg(dev, "could not open control VSI\n"); | |
4036 | goto err_vsi_open; | |
4037 | } | |
4038 | ||
4039 | mutex_init(&pf->hw.fdir_fltr_lock); | |
4040 | ||
4041 | err = ice_fdir_create_dflt_rules(pf); | |
4042 | if (err) | |
4043 | goto err_fdir_rule; | |
4044 | ||
4045 | return 0; | |
4046 | ||
4047 | err_fdir_rule: | |
4048 | ice_fdir_release_flows(&pf->hw); | |
4049 | ice_vsi_close(ctrl_vsi); | |
4050 | err_vsi_open: | |
4051 | ice_vsi_release(ctrl_vsi); | |
4052 | if (pf->ctrl_vsi_idx != ICE_NO_VSI) { | |
4053 | pf->vsi[pf->ctrl_vsi_idx] = NULL; | |
4054 | pf->ctrl_vsi_idx = ICE_NO_VSI; | |
4055 | } | |
4056 | return err; | |
4057 | } | |
4058 | ||
462acf6a TN |
4059 | /** |
4060 | * ice_get_opt_fw_name - return optional firmware file name or NULL | |
4061 | * @pf: pointer to the PF instance | |
4062 | */ | |
4063 | static char *ice_get_opt_fw_name(struct ice_pf *pf) | |
4064 | { | |
4065 | /* Optional firmware name same as default with additional dash | |
4066 | * followed by a EUI-64 identifier (PCIe Device Serial Number) | |
4067 | */ | |
4068 | struct pci_dev *pdev = pf->pdev; | |
ceb2f007 JK |
4069 | char *opt_fw_filename; |
4070 | u64 dsn; | |
462acf6a TN |
4071 | |
4072 | /* Determine the name of the optional file using the DSN (two | |
4073 | * dwords following the start of the DSN Capability). | |
4074 | */ | |
ceb2f007 JK |
4075 | dsn = pci_get_dsn(pdev); |
4076 | if (!dsn) | |
4077 | return NULL; | |
4078 | ||
4079 | opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL); | |
4080 | if (!opt_fw_filename) | |
4081 | return NULL; | |
4082 | ||
1a9c561a | 4083 | snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", |
ceb2f007 | 4084 | ICE_DDP_PKG_PATH, dsn); |
462acf6a TN |
4085 | |
4086 | return opt_fw_filename; | |
4087 | } | |
4088 | ||
4089 | /** | |
4090 | * ice_request_fw - Device initialization routine | |
4091 | * @pf: pointer to the PF instance | |
4092 | */ | |
4093 | static void ice_request_fw(struct ice_pf *pf) | |
4094 | { | |
4095 | char *opt_fw_filename = ice_get_opt_fw_name(pf); | |
4096 | const struct firmware *firmware = NULL; | |
4015d11e | 4097 | struct device *dev = ice_pf_to_dev(pf); |
462acf6a TN |
4098 | int err = 0; |
4099 | ||
4100 | /* optional device-specific DDP (if present) overrides the default DDP | |
4101 | * package file. kernel logs a debug message if the file doesn't exist, | |
4102 | * and warning messages for other errors. | |
4103 | */ | |
4104 | if (opt_fw_filename) { | |
4105 | err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); | |
4106 | if (err) { | |
4107 | kfree(opt_fw_filename); | |
4108 | goto dflt_pkg_load; | |
4109 | } | |
4110 | ||
4111 | /* request for firmware was successful. Download to device */ | |
4112 | ice_load_pkg(firmware, pf); | |
4113 | kfree(opt_fw_filename); | |
4114 | release_firmware(firmware); | |
4115 | return; | |
4116 | } | |
4117 | ||
4118 | dflt_pkg_load: | |
4119 | err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); | |
4120 | if (err) { | |
19cce2c6 | 4121 | dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); |
462acf6a TN |
4122 | return; |
4123 | } | |
4124 | ||
4125 | /* request for firmware was successful. Download to device */ | |
4126 | ice_load_pkg(firmware, pf); | |
4127 | release_firmware(firmware); | |
4128 | } | |
4129 | ||
769c500d AA |
4130 | /** |
4131 | * ice_print_wake_reason - show the wake up cause in the log | |
4132 | * @pf: pointer to the PF struct | |
4133 | */ | |
4134 | static void ice_print_wake_reason(struct ice_pf *pf) | |
4135 | { | |
4136 | u32 wus = pf->wakeup_reason; | |
4137 | const char *wake_str; | |
4138 | ||
4139 | /* if no wake event, nothing to print */ | |
4140 | if (!wus) | |
4141 | return; | |
4142 | ||
4143 | if (wus & PFPM_WUS_LNKC_M) | |
4144 | wake_str = "Link\n"; | |
4145 | else if (wus & PFPM_WUS_MAG_M) | |
4146 | wake_str = "Magic Packet\n"; | |
4147 | else if (wus & PFPM_WUS_MNG_M) | |
4148 | wake_str = "Management\n"; | |
4149 | else if (wus & PFPM_WUS_FW_RST_WK_M) | |
4150 | wake_str = "Firmware Reset\n"; | |
4151 | else | |
4152 | wake_str = "Unknown\n"; | |
4153 | ||
4154 | dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); | |
4155 | } | |
4156 | ||
1e23f076 AV |
4157 | /** |
4158 | * ice_register_netdev - register netdev and devlink port | |
4159 | * @pf: pointer to the PF struct | |
4160 | */ | |
4161 | static int ice_register_netdev(struct ice_pf *pf) | |
4162 | { | |
4163 | struct ice_vsi *vsi; | |
4164 | int err = 0; | |
4165 | ||
4166 | vsi = ice_get_main_vsi(pf); | |
4167 | if (!vsi || !vsi->netdev) | |
4168 | return -EIO; | |
4169 | ||
4170 | err = register_netdev(vsi->netdev); | |
4171 | if (err) | |
4172 | goto err_register_netdev; | |
4173 | ||
a476d72a | 4174 | set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); |
1e23f076 AV |
4175 | netif_carrier_off(vsi->netdev); |
4176 | netif_tx_stop_all_queues(vsi->netdev); | |
4177 | err = ice_devlink_create_port(vsi); | |
4178 | if (err) | |
4179 | goto err_devlink_create; | |
4180 | ||
4181 | devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev); | |
4182 | ||
4183 | return 0; | |
4184 | err_devlink_create: | |
4185 | unregister_netdev(vsi->netdev); | |
a476d72a | 4186 | clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); |
1e23f076 AV |
4187 | err_register_netdev: |
4188 | free_netdev(vsi->netdev); | |
4189 | vsi->netdev = NULL; | |
a476d72a | 4190 | clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); |
1e23f076 AV |
4191 | return err; |
4192 | } | |
4193 | ||
837f08fd AV |
4194 | /** |
4195 | * ice_probe - Device initialization routine | |
4196 | * @pdev: PCI device information struct | |
4197 | * @ent: entry in ice_pci_tbl | |
4198 | * | |
4199 | * Returns 0 on success, negative on failure | |
4200 | */ | |
c8b7abdd BA |
4201 | static int |
4202 | ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) | |
837f08fd | 4203 | { |
77ed84f4 | 4204 | struct device *dev = &pdev->dev; |
837f08fd AV |
4205 | struct ice_pf *pf; |
4206 | struct ice_hw *hw; | |
b20e6c17 | 4207 | int i, err; |
837f08fd | 4208 | |
50ac7479 AV |
4209 | if (pdev->is_virtfn) { |
4210 | dev_err(dev, "can't probe a virtual function\n"); | |
4211 | return -EINVAL; | |
4212 | } | |
4213 | ||
4ee656bb TN |
4214 | /* this driver uses devres, see |
4215 | * Documentation/driver-api/driver-model/devres.rst | |
4216 | */ | |
837f08fd AV |
4217 | err = pcim_enable_device(pdev); |
4218 | if (err) | |
4219 | return err; | |
4220 | ||
80ad6dde | 4221 | err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev)); |
837f08fd | 4222 | if (err) { |
77ed84f4 | 4223 | dev_err(dev, "BAR0 I/O map error %d\n", err); |
837f08fd AV |
4224 | return err; |
4225 | } | |
4226 | ||
1adf7ead | 4227 | pf = ice_allocate_pf(dev); |
837f08fd AV |
4228 | if (!pf) |
4229 | return -ENOMEM; | |
4230 | ||
2f2da36e | 4231 | /* set up for high or low DMA */ |
77ed84f4 | 4232 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
837f08fd | 4233 | if (err) |
77ed84f4 | 4234 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
837f08fd | 4235 | if (err) { |
77ed84f4 | 4236 | dev_err(dev, "DMA configuration failed: 0x%x\n", err); |
837f08fd AV |
4237 | return err; |
4238 | } | |
4239 | ||
4240 | pci_enable_pcie_error_reporting(pdev); | |
4241 | pci_set_master(pdev); | |
4242 | ||
4243 | pf->pdev = pdev; | |
4244 | pci_set_drvdata(pdev, pf); | |
7e408e07 | 4245 | set_bit(ICE_DOWN, pf->state); |
8d81fa55 | 4246 | /* Disable service task until DOWN bit is cleared */ |
7e408e07 | 4247 | set_bit(ICE_SERVICE_DIS, pf->state); |
837f08fd AV |
4248 | |
4249 | hw = &pf->hw; | |
4250 | hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; | |
4e56802e MS |
4251 | pci_save_state(pdev); |
4252 | ||
837f08fd AV |
4253 | hw->back = pf; |
4254 | hw->vendor_id = pdev->vendor; | |
4255 | hw->device_id = pdev->device; | |
4256 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | |
4257 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | |
4258 | hw->subsystem_device_id = pdev->subsystem_device; | |
4259 | hw->bus.device = PCI_SLOT(pdev->devfn); | |
4260 | hw->bus.func = PCI_FUNC(pdev->devfn); | |
f31e4b6f AV |
4261 | ice_set_ctrlq_len(hw); |
4262 | ||
837f08fd AV |
4263 | pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); |
4264 | ||
7ec59eea AV |
4265 | #ifndef CONFIG_DYNAMIC_DEBUG |
4266 | if (debug < -1) | |
4267 | hw->debug_mask = debug; | |
4268 | #endif | |
4269 | ||
f31e4b6f AV |
4270 | err = ice_init_hw(hw); |
4271 | if (err) { | |
77ed84f4 | 4272 | dev_err(dev, "ice_init_hw failed: %d\n", err); |
f31e4b6f AV |
4273 | err = -EIO; |
4274 | goto err_exit_unroll; | |
4275 | } | |
4276 | ||
40b24760 AV |
4277 | ice_init_feature_support(pf); |
4278 | ||
462acf6a TN |
4279 | ice_request_fw(pf); |
4280 | ||
4281 | /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be | |
4282 | * set in pf->state, which will cause ice_is_safe_mode to return | |
4283 | * true | |
4284 | */ | |
4285 | if (ice_is_safe_mode(pf)) { | |
19cce2c6 | 4286 | dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n"); |
462acf6a TN |
4287 | /* we already got function/device capabilities but these don't |
4288 | * reflect what the driver needs to do in safe mode. Instead of | |
4289 | * adding conditional logic everywhere to ignore these | |
4290 | * device/function capabilities, override them. | |
4291 | */ | |
4292 | ice_set_safe_mode_caps(hw); | |
4293 | } | |
4294 | ||
78b5713a AV |
4295 | err = ice_init_pf(pf); |
4296 | if (err) { | |
4297 | dev_err(dev, "ice_init_pf failed: %d\n", err); | |
4298 | goto err_init_pf_unroll; | |
4299 | } | |
940b61af | 4300 | |
dce730f1 JK |
4301 | ice_devlink_init_regions(pf); |
4302 | ||
b20e6c17 JK |
4303 | pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; |
4304 | pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; | |
4305 | pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; | |
4306 | pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; | |
4307 | i = 0; | |
4308 | if (pf->hw.tnl.valid_count[TNL_VXLAN]) { | |
4309 | pf->hw.udp_tunnel_nic.tables[i].n_entries = | |
4310 | pf->hw.tnl.valid_count[TNL_VXLAN]; | |
4311 | pf->hw.udp_tunnel_nic.tables[i].tunnel_types = | |
4312 | UDP_TUNNEL_TYPE_VXLAN; | |
4313 | i++; | |
4314 | } | |
4315 | if (pf->hw.tnl.valid_count[TNL_GENEVE]) { | |
4316 | pf->hw.udp_tunnel_nic.tables[i].n_entries = | |
4317 | pf->hw.tnl.valid_count[TNL_GENEVE]; | |
4318 | pf->hw.udp_tunnel_nic.tables[i].tunnel_types = | |
4319 | UDP_TUNNEL_TYPE_GENEVE; | |
4320 | i++; | |
4321 | } | |
4322 | ||
995c90f2 | 4323 | pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; |
940b61af AV |
4324 | if (!pf->num_alloc_vsi) { |
4325 | err = -EIO; | |
4326 | goto err_init_pf_unroll; | |
4327 | } | |
b20e6c17 JK |
4328 | if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { |
4329 | dev_warn(&pf->pdev->dev, | |
4330 | "limiting the VSI count due to UDP tunnel limitation %d > %d\n", | |
4331 | pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); | |
4332 | pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; | |
4333 | } | |
940b61af | 4334 | |
77ed84f4 BA |
4335 | pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), |
4336 | GFP_KERNEL); | |
940b61af AV |
4337 | if (!pf->vsi) { |
4338 | err = -ENOMEM; | |
4339 | goto err_init_pf_unroll; | |
4340 | } | |
4341 | ||
4342 | err = ice_init_interrupt_scheme(pf); | |
4343 | if (err) { | |
77ed84f4 | 4344 | dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); |
940b61af | 4345 | err = -EIO; |
bc3a0241 | 4346 | goto err_init_vsi_unroll; |
940b61af AV |
4347 | } |
4348 | ||
4349 | /* In case of MSIX we are going to setup the misc vector right here | |
4350 | * to handle admin queue events etc. In case of legacy and MSI | |
4351 | * the misc functionality and queue processing is combined in | |
4352 | * the same vector and that gets setup at open. | |
4353 | */ | |
ba880734 BC |
4354 | err = ice_req_irq_msix_misc(pf); |
4355 | if (err) { | |
4356 | dev_err(dev, "setup of misc vector failed: %d\n", err); | |
4357 | goto err_init_interrupt_unroll; | |
940b61af AV |
4358 | } |
4359 | ||
4360 | /* create switch struct for the switch element created by FW on boot */ | |
77ed84f4 | 4361 | pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL); |
940b61af AV |
4362 | if (!pf->first_sw) { |
4363 | err = -ENOMEM; | |
4364 | goto err_msix_misc_unroll; | |
4365 | } | |
4366 | ||
b1edc14a MFIP |
4367 | if (hw->evb_veb) |
4368 | pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; | |
4369 | else | |
4370 | pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; | |
4371 | ||
940b61af AV |
4372 | pf->first_sw->pf = pf; |
4373 | ||
4374 | /* record the sw_id available for later use */ | |
4375 | pf->first_sw->sw_id = hw->port_info->sw_id; | |
4376 | ||
3a858ba3 AV |
4377 | err = ice_setup_pf_sw(pf); |
4378 | if (err) { | |
4015d11e | 4379 | dev_err(dev, "probe failed due to setup PF switch: %d\n", err); |
3a858ba3 AV |
4380 | goto err_alloc_sw_unroll; |
4381 | } | |
9daf8208 | 4382 | |
7e408e07 | 4383 | clear_bit(ICE_SERVICE_DIS, pf->state); |
9daf8208 | 4384 | |
e3710a01 PSJ |
4385 | /* tell the firmware we are up */ |
4386 | err = ice_send_version(pf); | |
4387 | if (err) { | |
19cce2c6 | 4388 | dev_err(dev, "probe failed sending driver version %s. error: %d\n", |
34a2a3b8 | 4389 | UTS_RELEASE, err); |
78116e97 | 4390 | goto err_send_version_unroll; |
e3710a01 PSJ |
4391 | } |
4392 | ||
9daf8208 AV |
4393 | /* since everything is good, start the service timer */ |
4394 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); | |
4395 | ||
250c3b3e BC |
4396 | err = ice_init_link_events(pf->hw.port_info); |
4397 | if (err) { | |
4398 | dev_err(dev, "ice_init_link_events failed: %d\n", err); | |
78116e97 | 4399 | goto err_send_version_unroll; |
250c3b3e BC |
4400 | } |
4401 | ||
08771bce | 4402 | /* not a fatal error if this fails */ |
1a3571b5 | 4403 | err = ice_init_nvm_phy_type(pf->hw.port_info); |
08771bce | 4404 | if (err) |
1a3571b5 | 4405 | dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); |
1a3571b5 | 4406 | |
08771bce | 4407 | /* not a fatal error if this fails */ |
1a3571b5 | 4408 | err = ice_update_link_info(pf->hw.port_info); |
08771bce | 4409 | if (err) |
1a3571b5 | 4410 | dev_err(dev, "ice_update_link_info failed: %d\n", err); |
1a3571b5 | 4411 | |
ea78ce4d PG |
4412 | ice_init_link_dflt_override(pf->hw.port_info); |
4413 | ||
c77849f5 AV |
4414 | ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err); |
4415 | ||
1a3571b5 PG |
4416 | /* if media available, initialize PHY settings */ |
4417 | if (pf->hw.port_info->phy.link_info.link_info & | |
4418 | ICE_AQ_MEDIA_AVAILABLE) { | |
08771bce | 4419 | /* not a fatal error if this fails */ |
1a3571b5 | 4420 | err = ice_init_phy_user_cfg(pf->hw.port_info); |
08771bce | 4421 | if (err) |
1a3571b5 | 4422 | dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); |
1a3571b5 PG |
4423 | |
4424 | if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { | |
4425 | struct ice_vsi *vsi = ice_get_main_vsi(pf); | |
4426 | ||
4427 | if (vsi) | |
4428 | ice_configure_phy(vsi); | |
4429 | } | |
4430 | } else { | |
4431 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); | |
4432 | } | |
4433 | ||
c585ea42 BC |
4434 | ice_verify_cacheline_size(pf); |
4435 | ||
769c500d AA |
4436 | /* Save wakeup reason register for later use */ |
4437 | pf->wakeup_reason = rd32(hw, PFPM_WUS); | |
4438 | ||
4439 | /* check for a power management event */ | |
4440 | ice_print_wake_reason(pf); | |
4441 | ||
4442 | /* clear wake status, all bits */ | |
4443 | wr32(hw, PFPM_WUS, U32_MAX); | |
4444 | ||
4445 | /* Disable WoL at init, wait for user to enable */ | |
4446 | device_set_wakeup_enable(dev, false); | |
4447 | ||
cd1f56f4 BC |
4448 | if (ice_is_safe_mode(pf)) { |
4449 | ice_set_safe_mode_vlan_cfg(pf); | |
de75135b | 4450 | goto probe_done; |
cd1f56f4 | 4451 | } |
462acf6a TN |
4452 | |
4453 | /* initialize DDP driven features */ | |
06c16d89 JK |
4454 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
4455 | ice_ptp_init(pf); | |
462acf6a | 4456 | |
148beb61 HT |
4457 | /* Note: Flow director init failure is non-fatal to load */ |
4458 | if (ice_init_fdir(pf)) | |
4459 | dev_err(dev, "could not initialize flow director\n"); | |
4460 | ||
462acf6a TN |
4461 | /* Note: DCB init failure is non-fatal to load */ |
4462 | if (ice_init_pf_dcb(pf, false)) { | |
4463 | clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); | |
4464 | clear_bit(ICE_FLAG_DCB_ENA, pf->flags); | |
4465 | } else { | |
4466 | ice_cfg_lldp_mib_change(&pf->hw, true); | |
4467 | } | |
4468 | ||
df006dd4 DE |
4469 | if (ice_init_lag(pf)) |
4470 | dev_warn(dev, "Failed to init link aggregation support\n"); | |
4471 | ||
e18ff118 PG |
4472 | /* print PCI link speed and width */ |
4473 | pcie_print_link_status(pf->pdev); | |
4474 | ||
de75135b | 4475 | probe_done: |
1e23f076 AV |
4476 | err = ice_register_netdev(pf); |
4477 | if (err) | |
4478 | goto err_netdev_reg; | |
4479 | ||
de75135b | 4480 | /* ready to go, so clear down state bit */ |
7e408e07 | 4481 | clear_bit(ICE_DOWN, pf->state); |
d25a0fc4 DE |
4482 | if (ice_is_aux_ena(pf)) { |
4483 | pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL); | |
4484 | if (pf->aux_idx < 0) { | |
4485 | dev_err(dev, "Failed to allocate device ID for AUX driver\n"); | |
4486 | err = -ENOMEM; | |
4487 | goto err_netdev_reg; | |
4488 | } | |
4489 | ||
4490 | err = ice_init_rdma(pf); | |
4491 | if (err) { | |
4492 | dev_err(dev, "Failed to initialize RDMA: %d\n", err); | |
4493 | err = -EIO; | |
4494 | goto err_init_aux_unroll; | |
4495 | } | |
4496 | } else { | |
4497 | dev_warn(dev, "RDMA is not supported on this device\n"); | |
4498 | } | |
4499 | ||
838cefd5 | 4500 | ice_devlink_register(pf); |
837f08fd | 4501 | return 0; |
f31e4b6f | 4502 | |
d25a0fc4 DE |
4503 | err_init_aux_unroll: |
4504 | pf->adev = NULL; | |
4505 | ida_free(&ice_aux_ida, pf->aux_idx); | |
1e23f076 | 4506 | err_netdev_reg: |
78116e97 MS |
4507 | err_send_version_unroll: |
4508 | ice_vsi_release_all(pf); | |
3a858ba3 | 4509 | err_alloc_sw_unroll: |
7e408e07 AV |
4510 | set_bit(ICE_SERVICE_DIS, pf->state); |
4511 | set_bit(ICE_DOWN, pf->state); | |
4015d11e | 4512 | devm_kfree(dev, pf->first_sw); |
940b61af AV |
4513 | err_msix_misc_unroll: |
4514 | ice_free_irq_msix_misc(pf); | |
4515 | err_init_interrupt_unroll: | |
4516 | ice_clear_interrupt_scheme(pf); | |
bc3a0241 | 4517 | err_init_vsi_unroll: |
77ed84f4 | 4518 | devm_kfree(dev, pf->vsi); |
940b61af AV |
4519 | err_init_pf_unroll: |
4520 | ice_deinit_pf(pf); | |
dce730f1 | 4521 | ice_devlink_destroy_regions(pf); |
940b61af | 4522 | ice_deinit_hw(hw); |
f31e4b6f AV |
4523 | err_exit_unroll: |
4524 | pci_disable_pcie_error_reporting(pdev); | |
769c500d | 4525 | pci_disable_device(pdev); |
f31e4b6f | 4526 | return err; |
837f08fd AV |
4527 | } |
4528 | ||
769c500d AA |
4529 | /** |
4530 | * ice_set_wake - enable or disable Wake on LAN | |
4531 | * @pf: pointer to the PF struct | |
4532 | * | |
4533 | * Simple helper for WoL control | |
4534 | */ | |
4535 | static void ice_set_wake(struct ice_pf *pf) | |
4536 | { | |
4537 | struct ice_hw *hw = &pf->hw; | |
4538 | bool wol = pf->wol_ena; | |
4539 | ||
4540 | /* clear wake state, otherwise new wake events won't fire */ | |
4541 | wr32(hw, PFPM_WUS, U32_MAX); | |
4542 | ||
4543 | /* enable / disable APM wake up, no RMW needed */ | |
4544 | wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); | |
4545 | ||
4546 | /* set magic packet filter enabled */ | |
4547 | wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); | |
4548 | } | |
4549 | ||
4550 | /** | |
ef860480 | 4551 | * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet |
769c500d AA |
4552 | * @pf: pointer to the PF struct |
4553 | * | |
4554 | * Issue firmware command to enable multicast magic wake, making | |
4555 | * sure that any locally administered address (LAA) is used for | |
4556 | * wake, and that PF reset doesn't undo the LAA. | |
4557 | */ | |
4558 | static void ice_setup_mc_magic_wake(struct ice_pf *pf) | |
4559 | { | |
4560 | struct device *dev = ice_pf_to_dev(pf); | |
4561 | struct ice_hw *hw = &pf->hw; | |
4562 | enum ice_status status; | |
4563 | u8 mac_addr[ETH_ALEN]; | |
4564 | struct ice_vsi *vsi; | |
4565 | u8 flags; | |
4566 | ||
4567 | if (!pf->wol_ena) | |
4568 | return; | |
4569 | ||
4570 | vsi = ice_get_main_vsi(pf); | |
4571 | if (!vsi) | |
4572 | return; | |
4573 | ||
4574 | /* Get current MAC address in case it's an LAA */ | |
4575 | if (vsi->netdev) | |
4576 | ether_addr_copy(mac_addr, vsi->netdev->dev_addr); | |
4577 | else | |
4578 | ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); | |
4579 | ||
4580 | flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | | |
4581 | ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | | |
4582 | ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; | |
4583 | ||
4584 | status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); | |
4585 | if (status) | |
4586 | dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", | |
4587 | ice_stat_str(status), | |
4588 | ice_aq_str(hw->adminq.sq_last_status)); | |
4589 | } | |
4590 | ||
837f08fd AV |
4591 | /** |
4592 | * ice_remove - Device removal routine | |
4593 | * @pdev: PCI device information struct | |
4594 | */ | |
4595 | static void ice_remove(struct pci_dev *pdev) | |
4596 | { | |
4597 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
81b23589 | 4598 | int i; |
837f08fd | 4599 | |
838cefd5 | 4600 | ice_devlink_unregister(pf); |
afd9d4ab AV |
4601 | for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { |
4602 | if (!ice_is_reset_in_progress(pf->state)) | |
4603 | break; | |
4604 | msleep(100); | |
4605 | } | |
4606 | ||
f844d521 | 4607 | if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { |
7e408e07 | 4608 | set_bit(ICE_VF_RESETS_DISABLED, pf->state); |
f844d521 BC |
4609 | ice_free_vfs(pf); |
4610 | } | |
4611 | ||
8d81fa55 | 4612 | ice_service_task_stop(pf); |
f31e4b6f | 4613 | |
d69ea414 | 4614 | ice_aq_cancel_waiting_tasks(pf); |
f9f5301e | 4615 | ice_unplug_aux_dev(pf); |
d25a0fc4 | 4616 | ida_free(&ice_aux_ida, pf->aux_idx); |
f9f5301e | 4617 | set_bit(ICE_DOWN, pf->state); |
d69ea414 | 4618 | |
148beb61 | 4619 | mutex_destroy(&(&pf->hw)->fdir_fltr_lock); |
df006dd4 | 4620 | ice_deinit_lag(pf); |
06c16d89 JK |
4621 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) |
4622 | ice_ptp_release(pf); | |
28bf2672 BC |
4623 | if (!ice_is_safe_mode(pf)) |
4624 | ice_remove_arfs(pf); | |
769c500d | 4625 | ice_setup_mc_magic_wake(pf); |
0f9d5027 | 4626 | ice_vsi_release_all(pf); |
769c500d | 4627 | ice_set_wake(pf); |
940b61af | 4628 | ice_free_irq_msix_misc(pf); |
81b23589 DE |
4629 | ice_for_each_vsi(pf, i) { |
4630 | if (!pf->vsi[i]) | |
4631 | continue; | |
4632 | ice_vsi_free_q_vectors(pf->vsi[i]); | |
4633 | } | |
940b61af | 4634 | ice_deinit_pf(pf); |
dce730f1 | 4635 | ice_devlink_destroy_regions(pf); |
f31e4b6f | 4636 | ice_deinit_hw(&pf->hw); |
1adf7ead | 4637 | |
18057cb3 BA |
4638 | /* Issue a PFR as part of the prescribed driver unload flow. Do not |
4639 | * do it via ice_schedule_reset() since there is no need to rebuild | |
4640 | * and the service task is already stopped. | |
4641 | */ | |
4642 | ice_reset(&pf->hw, ICE_RESET_PFR); | |
c6012ac1 BA |
4643 | pci_wait_for_pending_transaction(pdev); |
4644 | ice_clear_interrupt_scheme(pf); | |
837f08fd | 4645 | pci_disable_pcie_error_reporting(pdev); |
769c500d AA |
4646 | pci_disable_device(pdev); |
4647 | } | |
4648 | ||
4649 | /** | |
4650 | * ice_shutdown - PCI callback for shutting down device | |
4651 | * @pdev: PCI device information struct | |
4652 | */ | |
4653 | static void ice_shutdown(struct pci_dev *pdev) | |
4654 | { | |
4655 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
4656 | ||
4657 | ice_remove(pdev); | |
4658 | ||
4659 | if (system_state == SYSTEM_POWER_OFF) { | |
4660 | pci_wake_from_d3(pdev, pf->wol_ena); | |
4661 | pci_set_power_state(pdev, PCI_D3hot); | |
4662 | } | |
837f08fd AV |
4663 | } |
4664 | ||
769c500d AA |
4665 | #ifdef CONFIG_PM |
4666 | /** | |
4667 | * ice_prepare_for_shutdown - prep for PCI shutdown | |
4668 | * @pf: board private structure | |
4669 | * | |
4670 | * Inform or close all dependent features in prep for PCI device shutdown | |
4671 | */ | |
4672 | static void ice_prepare_for_shutdown(struct ice_pf *pf) | |
4673 | { | |
4674 | struct ice_hw *hw = &pf->hw; | |
4675 | u32 v; | |
4676 | ||
4677 | /* Notify VFs of impending reset */ | |
4678 | if (ice_check_sq_alive(hw, &hw->mailboxq)) | |
4679 | ice_vc_notify_reset(pf); | |
4680 | ||
4681 | dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); | |
4682 | ||
4683 | /* disable the VSIs and their queues that are not already DOWN */ | |
4684 | ice_pf_dis_all_vsi(pf, false); | |
4685 | ||
4686 | ice_for_each_vsi(pf, v) | |
4687 | if (pf->vsi[v]) | |
4688 | pf->vsi[v]->vsi_num = 0; | |
4689 | ||
4690 | ice_shutdown_all_ctrlq(hw); | |
4691 | } | |
4692 | ||
4693 | /** | |
4694 | * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme | |
4695 | * @pf: board private structure to reinitialize | |
4696 | * | |
4697 | * This routine reinitialize interrupt scheme that was cleared during | |
4698 | * power management suspend callback. | |
4699 | * | |
4700 | * This should be called during resume routine to re-allocate the q_vectors | |
4701 | * and reacquire interrupts. | |
4702 | */ | |
4703 | static int ice_reinit_interrupt_scheme(struct ice_pf *pf) | |
4704 | { | |
4705 | struct device *dev = ice_pf_to_dev(pf); | |
4706 | int ret, v; | |
4707 | ||
4708 | /* Since we clear MSIX flag during suspend, we need to | |
4709 | * set it back during resume... | |
4710 | */ | |
4711 | ||
4712 | ret = ice_init_interrupt_scheme(pf); | |
4713 | if (ret) { | |
4714 | dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); | |
4715 | return ret; | |
4716 | } | |
4717 | ||
4718 | /* Remap vectors and rings, after successful re-init interrupts */ | |
4719 | ice_for_each_vsi(pf, v) { | |
4720 | if (!pf->vsi[v]) | |
4721 | continue; | |
4722 | ||
4723 | ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); | |
4724 | if (ret) | |
4725 | goto err_reinit; | |
4726 | ice_vsi_map_rings_to_vectors(pf->vsi[v]); | |
4727 | } | |
4728 | ||
4729 | ret = ice_req_irq_msix_misc(pf); | |
4730 | if (ret) { | |
4731 | dev_err(dev, "Setting up misc vector failed after device suspend %d\n", | |
4732 | ret); | |
4733 | goto err_reinit; | |
4734 | } | |
4735 | ||
4736 | return 0; | |
4737 | ||
4738 | err_reinit: | |
4739 | while (v--) | |
4740 | if (pf->vsi[v]) | |
4741 | ice_vsi_free_q_vectors(pf->vsi[v]); | |
4742 | ||
4743 | return ret; | |
4744 | } | |
4745 | ||
4746 | /** | |
4747 | * ice_suspend | |
4748 | * @dev: generic device information structure | |
4749 | * | |
4750 | * Power Management callback to quiesce the device and prepare | |
4751 | * for D3 transition. | |
4752 | */ | |
65c72291 | 4753 | static int __maybe_unused ice_suspend(struct device *dev) |
769c500d AA |
4754 | { |
4755 | struct pci_dev *pdev = to_pci_dev(dev); | |
4756 | struct ice_pf *pf; | |
4757 | int disabled, v; | |
4758 | ||
4759 | pf = pci_get_drvdata(pdev); | |
4760 | ||
4761 | if (!ice_pf_state_is_nominal(pf)) { | |
4762 | dev_err(dev, "Device is not ready, no need to suspend it\n"); | |
4763 | return -EBUSY; | |
4764 | } | |
4765 | ||
4766 | /* Stop watchdog tasks until resume completion. | |
4767 | * Even though it is most likely that the service task is | |
4768 | * disabled if the device is suspended or down, the service task's | |
4769 | * state is controlled by a different state bit, and we should | |
4770 | * store and honor whatever state that bit is in at this point. | |
4771 | */ | |
4772 | disabled = ice_service_task_stop(pf); | |
4773 | ||
f9f5301e DE |
4774 | ice_unplug_aux_dev(pf); |
4775 | ||
769c500d | 4776 | /* Already suspended?, then there is nothing to do */ |
7e408e07 | 4777 | if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { |
769c500d AA |
4778 | if (!disabled) |
4779 | ice_service_task_restart(pf); | |
4780 | return 0; | |
4781 | } | |
4782 | ||
7e408e07 | 4783 | if (test_bit(ICE_DOWN, pf->state) || |
769c500d AA |
4784 | ice_is_reset_in_progress(pf->state)) { |
4785 | dev_err(dev, "can't suspend device in reset or already down\n"); | |
4786 | if (!disabled) | |
4787 | ice_service_task_restart(pf); | |
4788 | return 0; | |
4789 | } | |
4790 | ||
4791 | ice_setup_mc_magic_wake(pf); | |
4792 | ||
4793 | ice_prepare_for_shutdown(pf); | |
4794 | ||
4795 | ice_set_wake(pf); | |
4796 | ||
4797 | /* Free vectors, clear the interrupt scheme and release IRQs | |
4798 | * for proper hibernation, especially with large number of CPUs. | |
4799 | * Otherwise hibernation might fail when mapping all the vectors back | |
4800 | * to CPU0. | |
4801 | */ | |
4802 | ice_free_irq_msix_misc(pf); | |
4803 | ice_for_each_vsi(pf, v) { | |
4804 | if (!pf->vsi[v]) | |
4805 | continue; | |
4806 | ice_vsi_free_q_vectors(pf->vsi[v]); | |
4807 | } | |
1831da7e | 4808 | ice_free_cpu_rx_rmap(ice_get_main_vsi(pf)); |
769c500d AA |
4809 | ice_clear_interrupt_scheme(pf); |
4810 | ||
466e4392 | 4811 | pci_save_state(pdev); |
769c500d AA |
4812 | pci_wake_from_d3(pdev, pf->wol_ena); |
4813 | pci_set_power_state(pdev, PCI_D3hot); | |
4814 | return 0; | |
4815 | } | |
4816 | ||
4817 | /** | |
4818 | * ice_resume - PM callback for waking up from D3 | |
4819 | * @dev: generic device information structure | |
4820 | */ | |
65c72291 | 4821 | static int __maybe_unused ice_resume(struct device *dev) |
769c500d AA |
4822 | { |
4823 | struct pci_dev *pdev = to_pci_dev(dev); | |
4824 | enum ice_reset_req reset_type; | |
4825 | struct ice_pf *pf; | |
4826 | struct ice_hw *hw; | |
4827 | int ret; | |
4828 | ||
4829 | pci_set_power_state(pdev, PCI_D0); | |
4830 | pci_restore_state(pdev); | |
4831 | pci_save_state(pdev); | |
4832 | ||
4833 | if (!pci_device_is_present(pdev)) | |
4834 | return -ENODEV; | |
4835 | ||
4836 | ret = pci_enable_device_mem(pdev); | |
4837 | if (ret) { | |
4838 | dev_err(dev, "Cannot enable device after suspend\n"); | |
4839 | return ret; | |
4840 | } | |
4841 | ||
4842 | pf = pci_get_drvdata(pdev); | |
4843 | hw = &pf->hw; | |
4844 | ||
4845 | pf->wakeup_reason = rd32(hw, PFPM_WUS); | |
4846 | ice_print_wake_reason(pf); | |
4847 | ||
4848 | /* We cleared the interrupt scheme when we suspended, so we need to | |
4849 | * restore it now to resume device functionality. | |
4850 | */ | |
4851 | ret = ice_reinit_interrupt_scheme(pf); | |
4852 | if (ret) | |
4853 | dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); | |
4854 | ||
7e408e07 | 4855 | clear_bit(ICE_DOWN, pf->state); |
769c500d AA |
4856 | /* Now perform PF reset and rebuild */ |
4857 | reset_type = ICE_RESET_PFR; | |
4858 | /* re-enable service task for reset, but allow reset to schedule it */ | |
7e408e07 | 4859 | clear_bit(ICE_SERVICE_DIS, pf->state); |
769c500d AA |
4860 | |
4861 | if (ice_schedule_reset(pf, reset_type)) | |
4862 | dev_err(dev, "Reset during resume failed.\n"); | |
4863 | ||
7e408e07 | 4864 | clear_bit(ICE_SUSPENDED, pf->state); |
769c500d AA |
4865 | ice_service_task_restart(pf); |
4866 | ||
4867 | /* Restart the service task */ | |
4868 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); | |
4869 | ||
4870 | return 0; | |
4871 | } | |
4872 | #endif /* CONFIG_PM */ | |
4873 | ||
5995b6d0 BC |
4874 | /** |
4875 | * ice_pci_err_detected - warning that PCI error has been detected | |
4876 | * @pdev: PCI device information struct | |
4877 | * @err: the type of PCI error | |
4878 | * | |
4879 | * Called to warn that something happened on the PCI bus and the error handling | |
4880 | * is in progress. Allows the driver to gracefully prepare/handle PCI errors. | |
4881 | */ | |
4882 | static pci_ers_result_t | |
16d79cd4 | 4883 | ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) |
5995b6d0 BC |
4884 | { |
4885 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
4886 | ||
4887 | if (!pf) { | |
4888 | dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", | |
4889 | __func__, err); | |
4890 | return PCI_ERS_RESULT_DISCONNECT; | |
4891 | } | |
4892 | ||
7e408e07 | 4893 | if (!test_bit(ICE_SUSPENDED, pf->state)) { |
5995b6d0 BC |
4894 | ice_service_task_stop(pf); |
4895 | ||
7e408e07 AV |
4896 | if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { |
4897 | set_bit(ICE_PFR_REQ, pf->state); | |
5995b6d0 BC |
4898 | ice_prepare_for_reset(pf); |
4899 | } | |
4900 | } | |
4901 | ||
4902 | return PCI_ERS_RESULT_NEED_RESET; | |
4903 | } | |
4904 | ||
4905 | /** | |
4906 | * ice_pci_err_slot_reset - a PCI slot reset has just happened | |
4907 | * @pdev: PCI device information struct | |
4908 | * | |
4909 | * Called to determine if the driver can recover from the PCI slot reset by | |
4910 | * using a register read to determine if the device is recoverable. | |
4911 | */ | |
4912 | static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev) | |
4913 | { | |
4914 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
4915 | pci_ers_result_t result; | |
4916 | int err; | |
4917 | u32 reg; | |
4918 | ||
4919 | err = pci_enable_device_mem(pdev); | |
4920 | if (err) { | |
19cce2c6 | 4921 | dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", |
5995b6d0 BC |
4922 | err); |
4923 | result = PCI_ERS_RESULT_DISCONNECT; | |
4924 | } else { | |
4925 | pci_set_master(pdev); | |
4926 | pci_restore_state(pdev); | |
4927 | pci_save_state(pdev); | |
4928 | pci_wake_from_d3(pdev, false); | |
4929 | ||
4930 | /* Check for life */ | |
4931 | reg = rd32(&pf->hw, GLGEN_RTRIG); | |
4932 | if (!reg) | |
4933 | result = PCI_ERS_RESULT_RECOVERED; | |
4934 | else | |
4935 | result = PCI_ERS_RESULT_DISCONNECT; | |
4936 | } | |
4937 | ||
894020fd | 4938 | err = pci_aer_clear_nonfatal_status(pdev); |
5995b6d0 | 4939 | if (err) |
86f26a77 | 4940 | dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n", |
5995b6d0 BC |
4941 | err); |
4942 | /* non-fatal, continue */ | |
4943 | ||
4944 | return result; | |
4945 | } | |
4946 | ||
4947 | /** | |
4948 | * ice_pci_err_resume - restart operations after PCI error recovery | |
4949 | * @pdev: PCI device information struct | |
4950 | * | |
4951 | * Called to allow the driver to bring things back up after PCI error and/or | |
4952 | * reset recovery have finished | |
4953 | */ | |
4954 | static void ice_pci_err_resume(struct pci_dev *pdev) | |
4955 | { | |
4956 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
4957 | ||
4958 | if (!pf) { | |
19cce2c6 AV |
4959 | dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", |
4960 | __func__); | |
5995b6d0 BC |
4961 | return; |
4962 | } | |
4963 | ||
7e408e07 | 4964 | if (test_bit(ICE_SUSPENDED, pf->state)) { |
5995b6d0 BC |
4965 | dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", |
4966 | __func__); | |
4967 | return; | |
4968 | } | |
4969 | ||
a54a0b24 NN |
4970 | ice_restore_all_vfs_msi_state(pdev); |
4971 | ||
5995b6d0 BC |
4972 | ice_do_reset(pf, ICE_RESET_PFR); |
4973 | ice_service_task_restart(pf); | |
4974 | mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); | |
4975 | } | |
4976 | ||
4977 | /** | |
4978 | * ice_pci_err_reset_prepare - prepare device driver for PCI reset | |
4979 | * @pdev: PCI device information struct | |
4980 | */ | |
4981 | static void ice_pci_err_reset_prepare(struct pci_dev *pdev) | |
4982 | { | |
4983 | struct ice_pf *pf = pci_get_drvdata(pdev); | |
4984 | ||
7e408e07 | 4985 | if (!test_bit(ICE_SUSPENDED, pf->state)) { |
5995b6d0 BC |
4986 | ice_service_task_stop(pf); |
4987 | ||
7e408e07 AV |
4988 | if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { |
4989 | set_bit(ICE_PFR_REQ, pf->state); | |
5995b6d0 BC |
4990 | ice_prepare_for_reset(pf); |
4991 | } | |
4992 | } | |
4993 | } | |
4994 | ||
4995 | /** | |
4996 | * ice_pci_err_reset_done - PCI reset done, device driver reset can begin | |
4997 | * @pdev: PCI device information struct | |
4998 | */ | |
4999 | static void ice_pci_err_reset_done(struct pci_dev *pdev) | |
5000 | { | |
5001 | ice_pci_err_resume(pdev); | |
5002 | } | |
5003 | ||
837f08fd AV |
5004 | /* ice_pci_tbl - PCI Device ID Table |
5005 | * | |
5006 | * Wildcard entries (PCI_ANY_ID) should come last | |
5007 | * Last entry must be all 0s | |
5008 | * | |
5009 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | |
5010 | * Class, Class Mask, private data (not used) } | |
5011 | */ | |
5012 | static const struct pci_device_id ice_pci_tbl[] = { | |
633d7449 AV |
5013 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 }, |
5014 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 }, | |
5015 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 }, | |
195fb977 | 5016 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 }, |
e36aeec0 BA |
5017 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 }, |
5018 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 }, | |
5019 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 }, | |
5020 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 }, | |
5021 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 }, | |
5d9e618c JK |
5022 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 }, |
5023 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 }, | |
5024 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 }, | |
5025 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 }, | |
5026 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 }, | |
2fbfa966 | 5027 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 }, |
5d9e618c JK |
5028 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 }, |
5029 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 }, | |
5030 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 }, | |
e36aeec0 BA |
5031 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 }, |
5032 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 }, | |
5033 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 }, | |
5034 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 }, | |
5035 | { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, | |
837f08fd AV |
5036 | /* required last entry */ |
5037 | { 0, } | |
5038 | }; | |
5039 | MODULE_DEVICE_TABLE(pci, ice_pci_tbl); | |
5040 | ||
769c500d AA |
5041 | static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); |
5042 | ||
5995b6d0 BC |
5043 | static const struct pci_error_handlers ice_pci_err_handler = { |
5044 | .error_detected = ice_pci_err_detected, | |
5045 | .slot_reset = ice_pci_err_slot_reset, | |
5046 | .reset_prepare = ice_pci_err_reset_prepare, | |
5047 | .reset_done = ice_pci_err_reset_done, | |
5048 | .resume = ice_pci_err_resume | |
5049 | }; | |
5050 | ||
837f08fd AV |
5051 | static struct pci_driver ice_driver = { |
5052 | .name = KBUILD_MODNAME, | |
5053 | .id_table = ice_pci_tbl, | |
5054 | .probe = ice_probe, | |
5055 | .remove = ice_remove, | |
769c500d AA |
5056 | #ifdef CONFIG_PM |
5057 | .driver.pm = &ice_pm_ops, | |
5058 | #endif /* CONFIG_PM */ | |
5059 | .shutdown = ice_shutdown, | |
ddf30f7f | 5060 | .sriov_configure = ice_sriov_configure, |
5995b6d0 | 5061 | .err_handler = &ice_pci_err_handler |
837f08fd AV |
5062 | }; |
5063 | ||
5064 | /** | |
5065 | * ice_module_init - Driver registration routine | |
5066 | * | |
5067 | * ice_module_init is the first routine called when the driver is | |
5068 | * loaded. All it does is register with the PCI subsystem. | |
5069 | */ | |
5070 | static int __init ice_module_init(void) | |
5071 | { | |
5072 | int status; | |
5073 | ||
34a2a3b8 | 5074 | pr_info("%s\n", ice_driver_string); |
837f08fd AV |
5075 | pr_info("%s\n", ice_copyright); |
5076 | ||
0f9d5027 | 5077 | ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); |
940b61af AV |
5078 | if (!ice_wq) { |
5079 | pr_err("Failed to create workqueue\n"); | |
5080 | return -ENOMEM; | |
5081 | } | |
5082 | ||
837f08fd | 5083 | status = pci_register_driver(&ice_driver); |
940b61af | 5084 | if (status) { |
2f2da36e | 5085 | pr_err("failed to register PCI driver, err %d\n", status); |
940b61af AV |
5086 | destroy_workqueue(ice_wq); |
5087 | } | |
837f08fd AV |
5088 | |
5089 | return status; | |
5090 | } | |
5091 | module_init(ice_module_init); | |
5092 | ||
5093 | /** | |
5094 | * ice_module_exit - Driver exit cleanup routine | |
5095 | * | |
5096 | * ice_module_exit is called just before the driver is removed | |
5097 | * from memory. | |
5098 | */ | |
5099 | static void __exit ice_module_exit(void) | |
5100 | { | |
5101 | pci_unregister_driver(&ice_driver); | |
940b61af | 5102 | destroy_workqueue(ice_wq); |
837f08fd AV |
5103 | pr_info("module unloaded\n"); |
5104 | } | |
5105 | module_exit(ice_module_exit); | |
3a858ba3 | 5106 | |
e94d4478 | 5107 | /** |
f9867df6 | 5108 | * ice_set_mac_address - NDO callback to set MAC address |
e94d4478 AV |
5109 | * @netdev: network interface device structure |
5110 | * @pi: pointer to an address structure | |
5111 | * | |
5112 | * Returns 0 on success, negative on failure | |
5113 | */ | |
5114 | static int ice_set_mac_address(struct net_device *netdev, void *pi) | |
5115 | { | |
5116 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5117 | struct ice_vsi *vsi = np->vsi; | |
5118 | struct ice_pf *pf = vsi->back; | |
5119 | struct ice_hw *hw = &pf->hw; | |
5120 | struct sockaddr *addr = pi; | |
5121 | enum ice_status status; | |
b357d971 | 5122 | u8 old_mac[ETH_ALEN]; |
e94d4478 | 5123 | u8 flags = 0; |
bbb968e8 | 5124 | int err = 0; |
e94d4478 AV |
5125 | u8 *mac; |
5126 | ||
5127 | mac = (u8 *)addr->sa_data; | |
5128 | ||
5129 | if (!is_valid_ether_addr(mac)) | |
5130 | return -EADDRNOTAVAIL; | |
5131 | ||
5132 | if (ether_addr_equal(netdev->dev_addr, mac)) { | |
3ba7f53f | 5133 | netdev_dbg(netdev, "already using mac %pM\n", mac); |
e94d4478 AV |
5134 | return 0; |
5135 | } | |
5136 | ||
7e408e07 | 5137 | if (test_bit(ICE_DOWN, pf->state) || |
5df7e45d | 5138 | ice_is_reset_in_progress(pf->state)) { |
e94d4478 AV |
5139 | netdev_err(netdev, "can't set mac %pM. device not ready\n", |
5140 | mac); | |
5141 | return -EBUSY; | |
5142 | } | |
5143 | ||
3ba7f53f | 5144 | netif_addr_lock_bh(netdev); |
b357d971 BC |
5145 | ether_addr_copy(old_mac, netdev->dev_addr); |
5146 | /* change the netdev's MAC address */ | |
a05e4c0a | 5147 | eth_hw_addr_set(netdev, mac); |
b357d971 BC |
5148 | netif_addr_unlock_bh(netdev); |
5149 | ||
757976ab | 5150 | /* Clean up old MAC filter. Not an error if old filter doesn't exist */ |
b357d971 | 5151 | status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); |
757976ab | 5152 | if (status && status != ICE_ERR_DOES_NOT_EXIST) { |
e94d4478 | 5153 | err = -EADDRNOTAVAIL; |
bbb968e8 | 5154 | goto err_update_filters; |
e94d4478 AV |
5155 | } |
5156 | ||
13ed5e8a | 5157 | /* Add filter for new MAC. If filter exists, return success */ |
1b8f15b6 | 5158 | status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); |
3ba7f53f | 5159 | if (status == ICE_ERR_ALREADY_EXISTS) |
13ed5e8a NN |
5160 | /* Although this MAC filter is already present in hardware it's |
5161 | * possible in some cases (e.g. bonding) that dev_addr was | |
5162 | * modified outside of the driver and needs to be restored back | |
5163 | * to this value. | |
5164 | */ | |
757976ab | 5165 | netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); |
3ba7f53f BC |
5166 | else if (status) |
5167 | /* error if the new filter addition failed */ | |
757976ab LY |
5168 | err = -EADDRNOTAVAIL; |
5169 | ||
bbb968e8 | 5170 | err_update_filters: |
e94d4478 | 5171 | if (err) { |
2f2da36e | 5172 | netdev_err(netdev, "can't set MAC %pM. filter update failed\n", |
e94d4478 | 5173 | mac); |
b357d971 | 5174 | netif_addr_lock_bh(netdev); |
f3956ebb | 5175 | eth_hw_addr_set(netdev, old_mac); |
3ba7f53f | 5176 | netif_addr_unlock_bh(netdev); |
e94d4478 AV |
5177 | return err; |
5178 | } | |
5179 | ||
2f2da36e | 5180 | netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", |
e94d4478 AV |
5181 | netdev->dev_addr); |
5182 | ||
f9867df6 | 5183 | /* write new MAC address to the firmware */ |
e94d4478 AV |
5184 | flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL; |
5185 | status = ice_aq_manage_mac_write(hw, mac, flags, NULL); | |
5186 | if (status) { | |
0fee3577 LY |
5187 | netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n", |
5188 | mac, ice_stat_str(status)); | |
e94d4478 AV |
5189 | } |
5190 | return 0; | |
5191 | } | |
5192 | ||
5193 | /** | |
5194 | * ice_set_rx_mode - NDO callback to set the netdev filters | |
5195 | * @netdev: network interface device structure | |
5196 | */ | |
5197 | static void ice_set_rx_mode(struct net_device *netdev) | |
5198 | { | |
5199 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5200 | struct ice_vsi *vsi = np->vsi; | |
5201 | ||
5202 | if (!vsi) | |
5203 | return; | |
5204 | ||
5205 | /* Set the flags to synchronize filters | |
5206 | * ndo_set_rx_mode may be triggered even without a change in netdev | |
5207 | * flags | |
5208 | */ | |
e97fb1ae AV |
5209 | set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); |
5210 | set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); | |
e94d4478 AV |
5211 | set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); |
5212 | ||
5213 | /* schedule our worker thread which will take care of | |
5214 | * applying the new filter changes | |
5215 | */ | |
5216 | ice_service_task_schedule(vsi->back); | |
5217 | } | |
5218 | ||
1ddef455 UK |
5219 | /** |
5220 | * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate | |
5221 | * @netdev: network interface device structure | |
5222 | * @queue_index: Queue ID | |
5223 | * @maxrate: maximum bandwidth in Mbps | |
5224 | */ | |
5225 | static int | |
5226 | ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) | |
5227 | { | |
5228 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5229 | struct ice_vsi *vsi = np->vsi; | |
5230 | enum ice_status status; | |
5231 | u16 q_handle; | |
5232 | u8 tc; | |
5233 | ||
5234 | /* Validate maxrate requested is within permitted range */ | |
5235 | if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) { | |
19cce2c6 | 5236 | netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n", |
1ddef455 UK |
5237 | maxrate, queue_index); |
5238 | return -EINVAL; | |
5239 | } | |
5240 | ||
5241 | q_handle = vsi->tx_rings[queue_index]->q_handle; | |
5242 | tc = ice_dcb_get_tc(vsi, queue_index); | |
5243 | ||
5244 | /* Set BW back to default, when user set maxrate to 0 */ | |
5245 | if (!maxrate) | |
5246 | status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, | |
5247 | q_handle, ICE_MAX_BW); | |
5248 | else | |
5249 | status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, | |
5250 | q_handle, ICE_MAX_BW, maxrate * 1000); | |
5251 | if (status) { | |
0fee3577 LY |
5252 | netdev_err(netdev, "Unable to set Tx max rate, error %s\n", |
5253 | ice_stat_str(status)); | |
1ddef455 UK |
5254 | return -EIO; |
5255 | } | |
5256 | ||
5257 | return 0; | |
5258 | } | |
5259 | ||
e94d4478 AV |
5260 | /** |
5261 | * ice_fdb_add - add an entry to the hardware database | |
5262 | * @ndm: the input from the stack | |
5263 | * @tb: pointer to array of nladdr (unused) | |
5264 | * @dev: the net device pointer | |
5265 | * @addr: the MAC address entry being added | |
f9867df6 | 5266 | * @vid: VLAN ID |
e94d4478 | 5267 | * @flags: instructions from stack about fdb operation |
99be37ed | 5268 | * @extack: netlink extended ack |
e94d4478 | 5269 | */ |
99be37ed BA |
5270 | static int |
5271 | ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], | |
5272 | struct net_device *dev, const unsigned char *addr, u16 vid, | |
5273 | u16 flags, struct netlink_ext_ack __always_unused *extack) | |
e94d4478 AV |
5274 | { |
5275 | int err; | |
5276 | ||
5277 | if (vid) { | |
5278 | netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n"); | |
5279 | return -EINVAL; | |
5280 | } | |
5281 | if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { | |
5282 | netdev_err(dev, "FDB only supports static addresses\n"); | |
5283 | return -EINVAL; | |
5284 | } | |
5285 | ||
5286 | if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) | |
5287 | err = dev_uc_add_excl(dev, addr); | |
5288 | else if (is_multicast_ether_addr(addr)) | |
5289 | err = dev_mc_add_excl(dev, addr); | |
5290 | else | |
5291 | err = -EINVAL; | |
5292 | ||
5293 | /* Only return duplicate errors if NLM_F_EXCL is set */ | |
5294 | if (err == -EEXIST && !(flags & NLM_F_EXCL)) | |
5295 | err = 0; | |
5296 | ||
5297 | return err; | |
5298 | } | |
5299 | ||
5300 | /** | |
5301 | * ice_fdb_del - delete an entry from the hardware database | |
5302 | * @ndm: the input from the stack | |
5303 | * @tb: pointer to array of nladdr (unused) | |
5304 | * @dev: the net device pointer | |
5305 | * @addr: the MAC address entry being added | |
f9867df6 | 5306 | * @vid: VLAN ID |
e94d4478 | 5307 | */ |
c8b7abdd BA |
5308 | static int |
5309 | ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], | |
5310 | struct net_device *dev, const unsigned char *addr, | |
5311 | __always_unused u16 vid) | |
e94d4478 AV |
5312 | { |
5313 | int err; | |
5314 | ||
5315 | if (ndm->ndm_state & NUD_PERMANENT) { | |
5316 | netdev_err(dev, "FDB only supports static addresses\n"); | |
5317 | return -EINVAL; | |
5318 | } | |
5319 | ||
5320 | if (is_unicast_ether_addr(addr)) | |
5321 | err = dev_uc_del(dev, addr); | |
5322 | else if (is_multicast_ether_addr(addr)) | |
5323 | err = dev_mc_del(dev, addr); | |
5324 | else | |
5325 | err = -EINVAL; | |
5326 | ||
5327 | return err; | |
5328 | } | |
5329 | ||
d76a60ba AV |
5330 | /** |
5331 | * ice_set_features - set the netdev feature flags | |
5332 | * @netdev: ptr to the netdev being adjusted | |
5333 | * @features: the feature set that the stack is suggesting | |
5334 | */ | |
c8b7abdd BA |
5335 | static int |
5336 | ice_set_features(struct net_device *netdev, netdev_features_t features) | |
d76a60ba AV |
5337 | { |
5338 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5339 | struct ice_vsi *vsi = np->vsi; | |
5f8cc355 | 5340 | struct ice_pf *pf = vsi->back; |
d76a60ba AV |
5341 | int ret = 0; |
5342 | ||
462acf6a TN |
5343 | /* Don't set any netdev advanced features with device in Safe Mode */ |
5344 | if (ice_is_safe_mode(vsi->back)) { | |
19cce2c6 | 5345 | dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n"); |
462acf6a TN |
5346 | return ret; |
5347 | } | |
5348 | ||
5f8cc355 HT |
5349 | /* Do not change setting during reset */ |
5350 | if (ice_is_reset_in_progress(pf->state)) { | |
19cce2c6 | 5351 | dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n"); |
5f8cc355 HT |
5352 | return -EBUSY; |
5353 | } | |
5354 | ||
8f529ff9 TN |
5355 | /* Multiple features can be changed in one call so keep features in |
5356 | * separate if/else statements to guarantee each feature is checked | |
5357 | */ | |
492af0ab | 5358 | if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) |
4fe36226 | 5359 | ice_vsi_manage_rss_lut(vsi, true); |
492af0ab MFIP |
5360 | else if (!(features & NETIF_F_RXHASH) && |
5361 | netdev->features & NETIF_F_RXHASH) | |
4fe36226 | 5362 | ice_vsi_manage_rss_lut(vsi, false); |
492af0ab | 5363 | |
d76a60ba AV |
5364 | if ((features & NETIF_F_HW_VLAN_CTAG_RX) && |
5365 | !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) | |
5366 | ret = ice_vsi_manage_vlan_stripping(vsi, true); | |
5367 | else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && | |
5368 | (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) | |
5369 | ret = ice_vsi_manage_vlan_stripping(vsi, false); | |
8f529ff9 TN |
5370 | |
5371 | if ((features & NETIF_F_HW_VLAN_CTAG_TX) && | |
5372 | !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) | |
d76a60ba AV |
5373 | ret = ice_vsi_manage_vlan_insertion(vsi); |
5374 | else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) && | |
5375 | (netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) | |
5376 | ret = ice_vsi_manage_vlan_insertion(vsi); | |
5377 | ||
3171948e TN |
5378 | if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && |
5379 | !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) | |
5380 | ret = ice_cfg_vlan_pruning(vsi, true, false); | |
5381 | else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && | |
5382 | (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) | |
5383 | ret = ice_cfg_vlan_pruning(vsi, false, false); | |
5384 | ||
148beb61 | 5385 | if ((features & NETIF_F_NTUPLE) && |
28bf2672 | 5386 | !(netdev->features & NETIF_F_NTUPLE)) { |
148beb61 | 5387 | ice_vsi_manage_fdir(vsi, true); |
28bf2672 BC |
5388 | ice_init_arfs(vsi); |
5389 | } else if (!(features & NETIF_F_NTUPLE) && | |
5390 | (netdev->features & NETIF_F_NTUPLE)) { | |
148beb61 | 5391 | ice_vsi_manage_fdir(vsi, false); |
28bf2672 BC |
5392 | ice_clear_arfs(vsi); |
5393 | } | |
148beb61 | 5394 | |
d76a60ba AV |
5395 | return ret; |
5396 | } | |
5397 | ||
5398 | /** | |
f9867df6 AV |
5399 | * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI |
5400 | * @vsi: VSI to setup VLAN properties for | |
d76a60ba AV |
5401 | */ |
5402 | static int ice_vsi_vlan_setup(struct ice_vsi *vsi) | |
5403 | { | |
5404 | int ret = 0; | |
5405 | ||
5406 | if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) | |
5407 | ret = ice_vsi_manage_vlan_stripping(vsi, true); | |
5408 | if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) | |
5409 | ret = ice_vsi_manage_vlan_insertion(vsi); | |
5410 | ||
5411 | return ret; | |
5412 | } | |
5413 | ||
cdedef59 AV |
5414 | /** |
5415 | * ice_vsi_cfg - Setup the VSI | |
5416 | * @vsi: the VSI being configured | |
5417 | * | |
5418 | * Return 0 on success and negative value on error | |
5419 | */ | |
0e674aeb | 5420 | int ice_vsi_cfg(struct ice_vsi *vsi) |
cdedef59 AV |
5421 | { |
5422 | int err; | |
5423 | ||
c7f2c42b AV |
5424 | if (vsi->netdev) { |
5425 | ice_set_rx_mode(vsi->netdev); | |
9ecd25c2 AV |
5426 | |
5427 | err = ice_vsi_vlan_setup(vsi); | |
5428 | ||
c7f2c42b AV |
5429 | if (err) |
5430 | return err; | |
5431 | } | |
a629cf0a | 5432 | ice_vsi_cfg_dcb_rings(vsi); |
03f7a986 AV |
5433 | |
5434 | err = ice_vsi_cfg_lan_txqs(vsi); | |
efc2214b MF |
5435 | if (!err && ice_is_xdp_ena_vsi(vsi)) |
5436 | err = ice_vsi_cfg_xdp_txqs(vsi); | |
cdedef59 AV |
5437 | if (!err) |
5438 | err = ice_vsi_cfg_rxqs(vsi); | |
5439 | ||
5440 | return err; | |
5441 | } | |
5442 | ||
cdf1f1f1 JK |
5443 | /* THEORY OF MODERATION: |
5444 | * The below code creates custom DIM profiles for use by this driver, because | |
5445 | * the ice driver hardware works differently than the hardware that DIMLIB was | |
5446 | * originally made for. ice hardware doesn't have packet count limits that | |
5447 | * can trigger an interrupt, but it *does* have interrupt rate limit support, | |
5448 | * and this code adds that capability to be used by the driver when it's using | |
5449 | * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver | |
5450 | * for how to "respond" to traffic and interrupts, so this driver uses a | |
5451 | * slightly different set of moderation parameters to get best performance. | |
5452 | */ | |
5453 | struct ice_dim { | |
5454 | /* the throttle rate for interrupts, basically worst case delay before | |
5455 | * an initial interrupt fires, value is stored in microseconds. | |
5456 | */ | |
5457 | u16 itr; | |
5458 | /* the rate limit for interrupts, which can cap a delay from a small | |
5459 | * ITR at a certain amount of interrupts per second. f.e. a 2us ITR | |
5460 | * could yield as much as 500,000 interrupts per second, but with a | |
5461 | * 10us rate limit, it limits to 100,000 interrupts per second. Value | |
5462 | * is stored in microseconds. | |
5463 | */ | |
5464 | u16 intrl; | |
5465 | }; | |
5466 | ||
5467 | /* Make a different profile for Rx that doesn't allow quite so aggressive | |
5468 | * moderation at the high end (it maxes out at 128us or about 8k interrupts a | |
5469 | * second. The INTRL/rate parameters here are only useful to cap small ITR | |
5470 | * values, which is why for larger ITR's - like 128, which can only generate | |
5471 | * 8k interrupts per second, there is no point to rate limit and the values | |
5472 | * are set to zero. The rate limit values do affect latency, and so must | |
5473 | * be reasonably small so to not impact latency sensitive tests. | |
5474 | */ | |
5475 | static const struct ice_dim rx_profile[] = { | |
5476 | {2, 10}, | |
5477 | {8, 16}, | |
5478 | {32, 0}, | |
5479 | {96, 0}, | |
5480 | {128, 0} | |
5481 | }; | |
5482 | ||
5483 | /* The transmit profile, which has the same sorts of values | |
5484 | * as the previous struct | |
5485 | */ | |
5486 | static const struct ice_dim tx_profile[] = { | |
5487 | {2, 10}, | |
5488 | {8, 16}, | |
5489 | {64, 0}, | |
5490 | {128, 0}, | |
5491 | {256, 0} | |
5492 | }; | |
5493 | ||
5494 | static void ice_tx_dim_work(struct work_struct *work) | |
5495 | { | |
5496 | struct ice_ring_container *rc; | |
5497 | struct ice_q_vector *q_vector; | |
5498 | struct dim *dim; | |
5499 | u16 itr, intrl; | |
5500 | ||
5501 | dim = container_of(work, struct dim, work); | |
5502 | rc = container_of(dim, struct ice_ring_container, dim); | |
5503 | q_vector = container_of(rc, struct ice_q_vector, tx); | |
5504 | ||
5505 | if (dim->profile_ix >= ARRAY_SIZE(tx_profile)) | |
5506 | dim->profile_ix = ARRAY_SIZE(tx_profile) - 1; | |
5507 | ||
5508 | /* look up the values in our local table */ | |
5509 | itr = tx_profile[dim->profile_ix].itr; | |
5510 | intrl = tx_profile[dim->profile_ix].intrl; | |
5511 | ||
3089cf6d | 5512 | ice_trace(tx_dim_work, q_vector, dim); |
cdf1f1f1 JK |
5513 | ice_write_itr(rc, itr); |
5514 | ice_write_intrl(q_vector, intrl); | |
5515 | ||
5516 | dim->state = DIM_START_MEASURE; | |
5517 | } | |
5518 | ||
5519 | static void ice_rx_dim_work(struct work_struct *work) | |
5520 | { | |
5521 | struct ice_ring_container *rc; | |
5522 | struct ice_q_vector *q_vector; | |
5523 | struct dim *dim; | |
5524 | u16 itr, intrl; | |
5525 | ||
5526 | dim = container_of(work, struct dim, work); | |
5527 | rc = container_of(dim, struct ice_ring_container, dim); | |
5528 | q_vector = container_of(rc, struct ice_q_vector, rx); | |
5529 | ||
5530 | if (dim->profile_ix >= ARRAY_SIZE(rx_profile)) | |
5531 | dim->profile_ix = ARRAY_SIZE(rx_profile) - 1; | |
5532 | ||
5533 | /* look up the values in our local table */ | |
5534 | itr = rx_profile[dim->profile_ix].itr; | |
5535 | intrl = rx_profile[dim->profile_ix].intrl; | |
5536 | ||
3089cf6d | 5537 | ice_trace(rx_dim_work, q_vector, dim); |
cdf1f1f1 JK |
5538 | ice_write_itr(rc, itr); |
5539 | ice_write_intrl(q_vector, intrl); | |
5540 | ||
5541 | dim->state = DIM_START_MEASURE; | |
5542 | } | |
5543 | ||
2b245cb2 AV |
5544 | /** |
5545 | * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI | |
5546 | * @vsi: the VSI being configured | |
5547 | */ | |
5548 | static void ice_napi_enable_all(struct ice_vsi *vsi) | |
5549 | { | |
5550 | int q_idx; | |
5551 | ||
5552 | if (!vsi->netdev) | |
5553 | return; | |
5554 | ||
b4603dbf | 5555 | ice_for_each_q_vector(vsi, q_idx) { |
eec90376 YX |
5556 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
5557 | ||
cdf1f1f1 JK |
5558 | INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work); |
5559 | q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; | |
5560 | ||
5561 | INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work); | |
5562 | q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; | |
5563 | ||
eec90376 YX |
5564 | if (q_vector->rx.ring || q_vector->tx.ring) |
5565 | napi_enable(&q_vector->napi); | |
5566 | } | |
2b245cb2 AV |
5567 | } |
5568 | ||
cdedef59 AV |
5569 | /** |
5570 | * ice_up_complete - Finish the last steps of bringing up a connection | |
5571 | * @vsi: The VSI being configured | |
5572 | * | |
5573 | * Return 0 on success and negative value on error | |
5574 | */ | |
5575 | static int ice_up_complete(struct ice_vsi *vsi) | |
5576 | { | |
5577 | struct ice_pf *pf = vsi->back; | |
5578 | int err; | |
5579 | ||
ba880734 | 5580 | ice_vsi_cfg_msix(vsi); |
cdedef59 AV |
5581 | |
5582 | /* Enable only Rx rings, Tx rings were enabled by the FW when the | |
5583 | * Tx queue group list was configured and the context bits were | |
5584 | * programmed using ice_vsi_cfg_txqs | |
5585 | */ | |
13a6233b | 5586 | err = ice_vsi_start_all_rx_rings(vsi); |
cdedef59 AV |
5587 | if (err) |
5588 | return err; | |
5589 | ||
e97fb1ae | 5590 | clear_bit(ICE_VSI_DOWN, vsi->state); |
2b245cb2 | 5591 | ice_napi_enable_all(vsi); |
cdedef59 AV |
5592 | ice_vsi_ena_irq(vsi); |
5593 | ||
5594 | if (vsi->port_info && | |
5595 | (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && | |
5596 | vsi->netdev) { | |
5597 | ice_print_link_msg(vsi, true); | |
5598 | netif_tx_start_all_queues(vsi->netdev); | |
5599 | netif_carrier_on(vsi->netdev); | |
5600 | } | |
5601 | ||
5602 | ice_service_task_schedule(pf); | |
5603 | ||
1b5c19c7 | 5604 | return 0; |
cdedef59 AV |
5605 | } |
5606 | ||
fcea6f3d AV |
5607 | /** |
5608 | * ice_up - Bring the connection back up after being down | |
5609 | * @vsi: VSI being configured | |
5610 | */ | |
5611 | int ice_up(struct ice_vsi *vsi) | |
5612 | { | |
5613 | int err; | |
5614 | ||
5615 | err = ice_vsi_cfg(vsi); | |
5616 | if (!err) | |
5617 | err = ice_up_complete(vsi); | |
5618 | ||
5619 | return err; | |
5620 | } | |
5621 | ||
5622 | /** | |
5623 | * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring | |
5624 | * @ring: Tx or Rx ring to read stats from | |
5625 | * @pkts: packets stats counter | |
5626 | * @bytes: bytes stats counter | |
5627 | * | |
5628 | * This function fetches stats from the ring considering the atomic operations | |
5629 | * that needs to be performed to read u64 values in 32 bit machine. | |
5630 | */ | |
c8b7abdd BA |
5631 | static void |
5632 | ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes) | |
fcea6f3d AV |
5633 | { |
5634 | unsigned int start; | |
5635 | *pkts = 0; | |
5636 | *bytes = 0; | |
5637 | ||
5638 | if (!ring) | |
5639 | return; | |
5640 | do { | |
5641 | start = u64_stats_fetch_begin_irq(&ring->syncp); | |
5642 | *pkts = ring->stats.pkts; | |
5643 | *bytes = ring->stats.bytes; | |
5644 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); | |
5645 | } | |
5646 | ||
49d358e0 MP |
5647 | /** |
5648 | * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters | |
5649 | * @vsi: the VSI to be updated | |
5650 | * @rings: rings to work on | |
5651 | * @count: number of rings | |
5652 | */ | |
5653 | static void | |
5654 | ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings, | |
5655 | u16 count) | |
5656 | { | |
5657 | struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; | |
5658 | u16 i; | |
5659 | ||
5660 | for (i = 0; i < count; i++) { | |
5661 | struct ice_ring *ring; | |
5662 | u64 pkts, bytes; | |
5663 | ||
5664 | ring = READ_ONCE(rings[i]); | |
5665 | ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); | |
5666 | vsi_stats->tx_packets += pkts; | |
5667 | vsi_stats->tx_bytes += bytes; | |
5668 | vsi->tx_restart += ring->tx_stats.restart_q; | |
5669 | vsi->tx_busy += ring->tx_stats.tx_busy; | |
5670 | vsi->tx_linearize += ring->tx_stats.tx_linearize; | |
5671 | } | |
5672 | } | |
5673 | ||
fcea6f3d AV |
5674 | /** |
5675 | * ice_update_vsi_ring_stats - Update VSI stats counters | |
5676 | * @vsi: the VSI to be updated | |
5677 | */ | |
5678 | static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) | |
5679 | { | |
5680 | struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats; | |
fcea6f3d AV |
5681 | u64 pkts, bytes; |
5682 | int i; | |
5683 | ||
5684 | /* reset netdev stats */ | |
5685 | vsi_stats->tx_packets = 0; | |
5686 | vsi_stats->tx_bytes = 0; | |
5687 | vsi_stats->rx_packets = 0; | |
5688 | vsi_stats->rx_bytes = 0; | |
5689 | ||
5690 | /* reset non-netdev (extended) stats */ | |
5691 | vsi->tx_restart = 0; | |
5692 | vsi->tx_busy = 0; | |
5693 | vsi->tx_linearize = 0; | |
5694 | vsi->rx_buf_failed = 0; | |
5695 | vsi->rx_page_failed = 0; | |
5696 | ||
5697 | rcu_read_lock(); | |
5698 | ||
5699 | /* update Tx rings counters */ | |
49d358e0 | 5700 | ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq); |
fcea6f3d AV |
5701 | |
5702 | /* update Rx rings counters */ | |
5703 | ice_for_each_rxq(vsi, i) { | |
b6b0501d PSJ |
5704 | struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]); |
5705 | ||
fcea6f3d AV |
5706 | ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes); |
5707 | vsi_stats->rx_packets += pkts; | |
5708 | vsi_stats->rx_bytes += bytes; | |
5709 | vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; | |
5710 | vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; | |
5711 | } | |
5712 | ||
49d358e0 MP |
5713 | /* update XDP Tx rings counters */ |
5714 | if (ice_is_xdp_ena_vsi(vsi)) | |
5715 | ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings, | |
5716 | vsi->num_xdp_txq); | |
5717 | ||
fcea6f3d AV |
5718 | rcu_read_unlock(); |
5719 | } | |
5720 | ||
5721 | /** | |
5722 | * ice_update_vsi_stats - Update VSI stats counters | |
5723 | * @vsi: the VSI to be updated | |
5724 | */ | |
5a4a8673 | 5725 | void ice_update_vsi_stats(struct ice_vsi *vsi) |
fcea6f3d AV |
5726 | { |
5727 | struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; | |
5728 | struct ice_eth_stats *cur_es = &vsi->eth_stats; | |
5729 | struct ice_pf *pf = vsi->back; | |
5730 | ||
e97fb1ae | 5731 | if (test_bit(ICE_VSI_DOWN, vsi->state) || |
7e408e07 | 5732 | test_bit(ICE_CFG_BUSY, pf->state)) |
fcea6f3d AV |
5733 | return; |
5734 | ||
5735 | /* get stats as recorded by Tx/Rx rings */ | |
5736 | ice_update_vsi_ring_stats(vsi); | |
5737 | ||
5738 | /* get VSI stats as recorded by the hardware */ | |
5739 | ice_update_eth_stats(vsi); | |
5740 | ||
5741 | cur_ns->tx_errors = cur_es->tx_errors; | |
51fe27e1 | 5742 | cur_ns->rx_dropped = cur_es->rx_discards; |
fcea6f3d AV |
5743 | cur_ns->tx_dropped = cur_es->tx_discards; |
5744 | cur_ns->multicast = cur_es->rx_multicast; | |
5745 | ||
5746 | /* update some more netdev stats if this is main VSI */ | |
5747 | if (vsi->type == ICE_VSI_PF) { | |
5748 | cur_ns->rx_crc_errors = pf->stats.crc_errors; | |
5749 | cur_ns->rx_errors = pf->stats.crc_errors + | |
4f1fe43c BC |
5750 | pf->stats.illegal_bytes + |
5751 | pf->stats.rx_len_errors + | |
5752 | pf->stats.rx_undersize + | |
5753 | pf->hw_csum_rx_error + | |
5754 | pf->stats.rx_jabber + | |
5755 | pf->stats.rx_fragments + | |
5756 | pf->stats.rx_oversize; | |
fcea6f3d | 5757 | cur_ns->rx_length_errors = pf->stats.rx_len_errors; |
56923ab6 BC |
5758 | /* record drops from the port level */ |
5759 | cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; | |
fcea6f3d AV |
5760 | } |
5761 | } | |
5762 | ||
5763 | /** | |
5764 | * ice_update_pf_stats - Update PF port stats counters | |
5765 | * @pf: PF whose stats needs to be updated | |
5766 | */ | |
5a4a8673 | 5767 | void ice_update_pf_stats(struct ice_pf *pf) |
fcea6f3d AV |
5768 | { |
5769 | struct ice_hw_port_stats *prev_ps, *cur_ps; | |
5770 | struct ice_hw *hw = &pf->hw; | |
4ab95646 | 5771 | u16 fd_ctr_base; |
9e7a5d17 | 5772 | u8 port; |
fcea6f3d | 5773 | |
9e7a5d17 | 5774 | port = hw->port_info->lport; |
fcea6f3d AV |
5775 | prev_ps = &pf->stats_prev; |
5776 | cur_ps = &pf->stats; | |
fcea6f3d | 5777 | |
9e7a5d17 | 5778 | ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, |
36517fd3 | 5779 | &prev_ps->eth.rx_bytes, |
fcea6f3d AV |
5780 | &cur_ps->eth.rx_bytes); |
5781 | ||
9e7a5d17 | 5782 | ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, |
36517fd3 | 5783 | &prev_ps->eth.rx_unicast, |
fcea6f3d AV |
5784 | &cur_ps->eth.rx_unicast); |
5785 | ||
9e7a5d17 | 5786 | ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, |
36517fd3 | 5787 | &prev_ps->eth.rx_multicast, |
fcea6f3d AV |
5788 | &cur_ps->eth.rx_multicast); |
5789 | ||
9e7a5d17 | 5790 | ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, |
36517fd3 | 5791 | &prev_ps->eth.rx_broadcast, |
fcea6f3d AV |
5792 | &cur_ps->eth.rx_broadcast); |
5793 | ||
56923ab6 BC |
5794 | ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, |
5795 | &prev_ps->eth.rx_discards, | |
5796 | &cur_ps->eth.rx_discards); | |
5797 | ||
9e7a5d17 | 5798 | ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, |
36517fd3 | 5799 | &prev_ps->eth.tx_bytes, |
fcea6f3d AV |
5800 | &cur_ps->eth.tx_bytes); |
5801 | ||
9e7a5d17 | 5802 | ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, |
36517fd3 | 5803 | &prev_ps->eth.tx_unicast, |
fcea6f3d AV |
5804 | &cur_ps->eth.tx_unicast); |
5805 | ||
9e7a5d17 | 5806 | ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, |
36517fd3 | 5807 | &prev_ps->eth.tx_multicast, |
fcea6f3d AV |
5808 | &cur_ps->eth.tx_multicast); |
5809 | ||
9e7a5d17 | 5810 | ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, |
36517fd3 | 5811 | &prev_ps->eth.tx_broadcast, |
fcea6f3d AV |
5812 | &cur_ps->eth.tx_broadcast); |
5813 | ||
9e7a5d17 | 5814 | ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5815 | &prev_ps->tx_dropped_link_down, |
5816 | &cur_ps->tx_dropped_link_down); | |
5817 | ||
9e7a5d17 | 5818 | ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, |
36517fd3 | 5819 | &prev_ps->rx_size_64, &cur_ps->rx_size_64); |
fcea6f3d | 5820 | |
9e7a5d17 | 5821 | ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, |
36517fd3 | 5822 | &prev_ps->rx_size_127, &cur_ps->rx_size_127); |
fcea6f3d | 5823 | |
9e7a5d17 | 5824 | ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, |
36517fd3 | 5825 | &prev_ps->rx_size_255, &cur_ps->rx_size_255); |
fcea6f3d | 5826 | |
9e7a5d17 | 5827 | ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, |
36517fd3 | 5828 | &prev_ps->rx_size_511, &cur_ps->rx_size_511); |
fcea6f3d | 5829 | |
9e7a5d17 | 5830 | ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5831 | &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); |
5832 | ||
9e7a5d17 | 5833 | ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5834 | &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); |
5835 | ||
9e7a5d17 | 5836 | ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5837 | &prev_ps->rx_size_big, &cur_ps->rx_size_big); |
5838 | ||
9e7a5d17 | 5839 | ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, |
36517fd3 | 5840 | &prev_ps->tx_size_64, &cur_ps->tx_size_64); |
fcea6f3d | 5841 | |
9e7a5d17 | 5842 | ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, |
36517fd3 | 5843 | &prev_ps->tx_size_127, &cur_ps->tx_size_127); |
fcea6f3d | 5844 | |
9e7a5d17 | 5845 | ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, |
36517fd3 | 5846 | &prev_ps->tx_size_255, &cur_ps->tx_size_255); |
fcea6f3d | 5847 | |
9e7a5d17 | 5848 | ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, |
36517fd3 | 5849 | &prev_ps->tx_size_511, &cur_ps->tx_size_511); |
fcea6f3d | 5850 | |
9e7a5d17 | 5851 | ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5852 | &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); |
5853 | ||
9e7a5d17 | 5854 | ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5855 | &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); |
5856 | ||
9e7a5d17 | 5857 | ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5858 | &prev_ps->tx_size_big, &cur_ps->tx_size_big); |
5859 | ||
4ab95646 HT |
5860 | fd_ctr_base = hw->fd_ctr_base; |
5861 | ||
5862 | ice_stat_update40(hw, | |
5863 | GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)), | |
5864 | pf->stat_prev_loaded, &prev_ps->fd_sb_match, | |
5865 | &cur_ps->fd_sb_match); | |
9e7a5d17 | 5866 | ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5867 | &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); |
5868 | ||
9e7a5d17 | 5869 | ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5870 | &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); |
5871 | ||
9e7a5d17 | 5872 | ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5873 | &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); |
5874 | ||
9e7a5d17 | 5875 | ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5876 | &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); |
5877 | ||
4b0fdceb AV |
5878 | ice_update_dcb_stats(pf); |
5879 | ||
9e7a5d17 | 5880 | ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5881 | &prev_ps->crc_errors, &cur_ps->crc_errors); |
5882 | ||
9e7a5d17 | 5883 | ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5884 | &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); |
5885 | ||
9e7a5d17 | 5886 | ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5887 | &prev_ps->mac_local_faults, |
5888 | &cur_ps->mac_local_faults); | |
5889 | ||
9e7a5d17 | 5890 | ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5891 | &prev_ps->mac_remote_faults, |
5892 | &cur_ps->mac_remote_faults); | |
5893 | ||
9e7a5d17 | 5894 | ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5895 | &prev_ps->rx_len_errors, &cur_ps->rx_len_errors); |
5896 | ||
9e7a5d17 | 5897 | ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5898 | &prev_ps->rx_undersize, &cur_ps->rx_undersize); |
5899 | ||
9e7a5d17 | 5900 | ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5901 | &prev_ps->rx_fragments, &cur_ps->rx_fragments); |
5902 | ||
9e7a5d17 | 5903 | ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5904 | &prev_ps->rx_oversize, &cur_ps->rx_oversize); |
5905 | ||
9e7a5d17 | 5906 | ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, |
fcea6f3d AV |
5907 | &prev_ps->rx_jabber, &cur_ps->rx_jabber); |
5908 | ||
4ab95646 HT |
5909 | cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; |
5910 | ||
fcea6f3d AV |
5911 | pf->stat_prev_loaded = true; |
5912 | } | |
5913 | ||
5914 | /** | |
5915 | * ice_get_stats64 - get statistics for network device structure | |
5916 | * @netdev: network interface device structure | |
5917 | * @stats: main device statistics structure | |
5918 | */ | |
5919 | static | |
5920 | void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |
5921 | { | |
5922 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
5923 | struct rtnl_link_stats64 *vsi_stats; | |
5924 | struct ice_vsi *vsi = np->vsi; | |
5925 | ||
5926 | vsi_stats = &vsi->net_stats; | |
5927 | ||
3d57fd10 | 5928 | if (!vsi->num_txq || !vsi->num_rxq) |
fcea6f3d | 5929 | return; |
3d57fd10 | 5930 | |
fcea6f3d AV |
5931 | /* netdev packet/byte stats come from ring counter. These are obtained |
5932 | * by summing up ring counters (done by ice_update_vsi_ring_stats). | |
3d57fd10 DE |
5933 | * But, only call the update routine and read the registers if VSI is |
5934 | * not down. | |
fcea6f3d | 5935 | */ |
e97fb1ae | 5936 | if (!test_bit(ICE_VSI_DOWN, vsi->state)) |
3d57fd10 | 5937 | ice_update_vsi_ring_stats(vsi); |
fcea6f3d AV |
5938 | stats->tx_packets = vsi_stats->tx_packets; |
5939 | stats->tx_bytes = vsi_stats->tx_bytes; | |
5940 | stats->rx_packets = vsi_stats->rx_packets; | |
5941 | stats->rx_bytes = vsi_stats->rx_bytes; | |
5942 | ||
5943 | /* The rest of the stats can be read from the hardware but instead we | |
5944 | * just return values that the watchdog task has already obtained from | |
5945 | * the hardware. | |
5946 | */ | |
5947 | stats->multicast = vsi_stats->multicast; | |
5948 | stats->tx_errors = vsi_stats->tx_errors; | |
5949 | stats->tx_dropped = vsi_stats->tx_dropped; | |
5950 | stats->rx_errors = vsi_stats->rx_errors; | |
5951 | stats->rx_dropped = vsi_stats->rx_dropped; | |
5952 | stats->rx_crc_errors = vsi_stats->rx_crc_errors; | |
5953 | stats->rx_length_errors = vsi_stats->rx_length_errors; | |
5954 | } | |
5955 | ||
2b245cb2 AV |
5956 | /** |
5957 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI | |
5958 | * @vsi: VSI having NAPI disabled | |
5959 | */ | |
5960 | static void ice_napi_disable_all(struct ice_vsi *vsi) | |
5961 | { | |
5962 | int q_idx; | |
5963 | ||
5964 | if (!vsi->netdev) | |
5965 | return; | |
5966 | ||
0c2561c8 | 5967 | ice_for_each_q_vector(vsi, q_idx) { |
eec90376 YX |
5968 | struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; |
5969 | ||
5970 | if (q_vector->rx.ring || q_vector->tx.ring) | |
5971 | napi_disable(&q_vector->napi); | |
cdf1f1f1 JK |
5972 | |
5973 | cancel_work_sync(&q_vector->tx.dim.work); | |
5974 | cancel_work_sync(&q_vector->rx.dim.work); | |
eec90376 | 5975 | } |
2b245cb2 AV |
5976 | } |
5977 | ||
cdedef59 AV |
5978 | /** |
5979 | * ice_down - Shutdown the connection | |
5980 | * @vsi: The VSI being stopped | |
5981 | */ | |
fcea6f3d | 5982 | int ice_down(struct ice_vsi *vsi) |
cdedef59 | 5983 | { |
ab4ab73f | 5984 | int i, tx_err, rx_err, link_err = 0; |
cdedef59 AV |
5985 | |
5986 | /* Caller of this function is expected to set the | |
7e408e07 | 5987 | * vsi->state ICE_DOWN bit |
cdedef59 AV |
5988 | */ |
5989 | if (vsi->netdev) { | |
5990 | netif_carrier_off(vsi->netdev); | |
5991 | netif_tx_disable(vsi->netdev); | |
5992 | } | |
5993 | ||
5994 | ice_vsi_dis_irq(vsi); | |
03f7a986 AV |
5995 | |
5996 | tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); | |
72adf242 | 5997 | if (tx_err) |
19cce2c6 | 5998 | netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", |
72adf242 | 5999 | vsi->vsi_num, tx_err); |
efc2214b MF |
6000 | if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { |
6001 | tx_err = ice_vsi_stop_xdp_tx_rings(vsi); | |
6002 | if (tx_err) | |
19cce2c6 | 6003 | netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", |
efc2214b MF |
6004 | vsi->vsi_num, tx_err); |
6005 | } | |
72adf242 | 6006 | |
13a6233b | 6007 | rx_err = ice_vsi_stop_all_rx_rings(vsi); |
72adf242 | 6008 | if (rx_err) |
19cce2c6 | 6009 | netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", |
72adf242 AV |
6010 | vsi->vsi_num, rx_err); |
6011 | ||
2b245cb2 | 6012 | ice_napi_disable_all(vsi); |
cdedef59 | 6013 | |
ab4ab73f BA |
6014 | if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { |
6015 | link_err = ice_force_phys_link_state(vsi, false); | |
6016 | if (link_err) | |
19cce2c6 | 6017 | netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", |
ab4ab73f BA |
6018 | vsi->vsi_num, link_err); |
6019 | } | |
b6f934f0 | 6020 | |
cdedef59 AV |
6021 | ice_for_each_txq(vsi, i) |
6022 | ice_clean_tx_ring(vsi->tx_rings[i]); | |
6023 | ||
6024 | ice_for_each_rxq(vsi, i) | |
6025 | ice_clean_rx_ring(vsi->rx_rings[i]); | |
6026 | ||
b6f934f0 | 6027 | if (tx_err || rx_err || link_err) { |
19cce2c6 | 6028 | netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", |
cdedef59 | 6029 | vsi->vsi_num, vsi->vsw->sw_id); |
72adf242 AV |
6030 | return -EIO; |
6031 | } | |
6032 | ||
6033 | return 0; | |
cdedef59 AV |
6034 | } |
6035 | ||
6036 | /** | |
6037 | * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources | |
6038 | * @vsi: VSI having resources allocated | |
6039 | * | |
6040 | * Return 0 on success, negative on failure | |
6041 | */ | |
0e674aeb | 6042 | int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) |
cdedef59 | 6043 | { |
dab0588f | 6044 | int i, err = 0; |
cdedef59 AV |
6045 | |
6046 | if (!vsi->num_txq) { | |
9a946843 | 6047 | dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", |
cdedef59 AV |
6048 | vsi->vsi_num); |
6049 | return -EINVAL; | |
6050 | } | |
6051 | ||
6052 | ice_for_each_txq(vsi, i) { | |
eb0ee8ab MS |
6053 | struct ice_ring *ring = vsi->tx_rings[i]; |
6054 | ||
6055 | if (!ring) | |
6056 | return -EINVAL; | |
6057 | ||
6058 | ring->netdev = vsi->netdev; | |
6059 | err = ice_setup_tx_ring(ring); | |
cdedef59 AV |
6060 | if (err) |
6061 | break; | |
6062 | } | |
6063 | ||
6064 | return err; | |
6065 | } | |
6066 | ||
6067 | /** | |
6068 | * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources | |
6069 | * @vsi: VSI having resources allocated | |
6070 | * | |
6071 | * Return 0 on success, negative on failure | |
6072 | */ | |
0e674aeb | 6073 | int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) |
cdedef59 | 6074 | { |
dab0588f | 6075 | int i, err = 0; |
cdedef59 AV |
6076 | |
6077 | if (!vsi->num_rxq) { | |
9a946843 | 6078 | dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", |
cdedef59 AV |
6079 | vsi->vsi_num); |
6080 | return -EINVAL; | |
6081 | } | |
6082 | ||
6083 | ice_for_each_rxq(vsi, i) { | |
eb0ee8ab MS |
6084 | struct ice_ring *ring = vsi->rx_rings[i]; |
6085 | ||
6086 | if (!ring) | |
6087 | return -EINVAL; | |
6088 | ||
6089 | ring->netdev = vsi->netdev; | |
6090 | err = ice_setup_rx_ring(ring); | |
cdedef59 AV |
6091 | if (err) |
6092 | break; | |
6093 | } | |
6094 | ||
6095 | return err; | |
6096 | } | |
6097 | ||
148beb61 HT |
6098 | /** |
6099 | * ice_vsi_open_ctrl - open control VSI for use | |
6100 | * @vsi: the VSI to open | |
6101 | * | |
6102 | * Initialization of the Control VSI | |
6103 | * | |
6104 | * Returns 0 on success, negative value on error | |
6105 | */ | |
6106 | int ice_vsi_open_ctrl(struct ice_vsi *vsi) | |
6107 | { | |
6108 | char int_name[ICE_INT_NAME_STR_LEN]; | |
6109 | struct ice_pf *pf = vsi->back; | |
6110 | struct device *dev; | |
6111 | int err; | |
6112 | ||
6113 | dev = ice_pf_to_dev(pf); | |
6114 | /* allocate descriptors */ | |
6115 | err = ice_vsi_setup_tx_rings(vsi); | |
6116 | if (err) | |
6117 | goto err_setup_tx; | |
6118 | ||
6119 | err = ice_vsi_setup_rx_rings(vsi); | |
6120 | if (err) | |
6121 | goto err_setup_rx; | |
6122 | ||
6123 | err = ice_vsi_cfg(vsi); | |
6124 | if (err) | |
6125 | goto err_setup_rx; | |
6126 | ||
6127 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", | |
6128 | dev_driver_string(dev), dev_name(dev)); | |
6129 | err = ice_vsi_req_irq_msix(vsi, int_name); | |
6130 | if (err) | |
6131 | goto err_setup_rx; | |
6132 | ||
6133 | ice_vsi_cfg_msix(vsi); | |
6134 | ||
6135 | err = ice_vsi_start_all_rx_rings(vsi); | |
6136 | if (err) | |
6137 | goto err_up_complete; | |
6138 | ||
e97fb1ae | 6139 | clear_bit(ICE_VSI_DOWN, vsi->state); |
148beb61 HT |
6140 | ice_vsi_ena_irq(vsi); |
6141 | ||
6142 | return 0; | |
6143 | ||
6144 | err_up_complete: | |
6145 | ice_down(vsi); | |
6146 | err_setup_rx: | |
6147 | ice_vsi_free_rx_rings(vsi); | |
6148 | err_setup_tx: | |
6149 | ice_vsi_free_tx_rings(vsi); | |
6150 | ||
6151 | return err; | |
6152 | } | |
6153 | ||
cdedef59 AV |
6154 | /** |
6155 | * ice_vsi_open - Called when a network interface is made active | |
6156 | * @vsi: the VSI to open | |
6157 | * | |
6158 | * Initialization of the VSI | |
6159 | * | |
6160 | * Returns 0 on success, negative value on error | |
6161 | */ | |
6162 | static int ice_vsi_open(struct ice_vsi *vsi) | |
6163 | { | |
6164 | char int_name[ICE_INT_NAME_STR_LEN]; | |
6165 | struct ice_pf *pf = vsi->back; | |
6166 | int err; | |
6167 | ||
6168 | /* allocate descriptors */ | |
6169 | err = ice_vsi_setup_tx_rings(vsi); | |
6170 | if (err) | |
6171 | goto err_setup_tx; | |
6172 | ||
6173 | err = ice_vsi_setup_rx_rings(vsi); | |
6174 | if (err) | |
6175 | goto err_setup_rx; | |
6176 | ||
6177 | err = ice_vsi_cfg(vsi); | |
6178 | if (err) | |
6179 | goto err_setup_rx; | |
6180 | ||
6181 | snprintf(int_name, sizeof(int_name) - 1, "%s-%s", | |
4015d11e | 6182 | dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); |
ba880734 | 6183 | err = ice_vsi_req_irq_msix(vsi, int_name); |
cdedef59 AV |
6184 | if (err) |
6185 | goto err_setup_rx; | |
6186 | ||
6187 | /* Notify the stack of the actual queue counts. */ | |
6188 | err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); | |
6189 | if (err) | |
6190 | goto err_set_qs; | |
6191 | ||
6192 | err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); | |
6193 | if (err) | |
6194 | goto err_set_qs; | |
6195 | ||
6196 | err = ice_up_complete(vsi); | |
6197 | if (err) | |
6198 | goto err_up_complete; | |
6199 | ||
6200 | return 0; | |
6201 | ||
6202 | err_up_complete: | |
6203 | ice_down(vsi); | |
6204 | err_set_qs: | |
6205 | ice_vsi_free_irq(vsi); | |
6206 | err_setup_rx: | |
6207 | ice_vsi_free_rx_rings(vsi); | |
6208 | err_setup_tx: | |
6209 | ice_vsi_free_tx_rings(vsi); | |
6210 | ||
6211 | return err; | |
6212 | } | |
6213 | ||
0f9d5027 AV |
6214 | /** |
6215 | * ice_vsi_release_all - Delete all VSIs | |
6216 | * @pf: PF from which all VSIs are being removed | |
6217 | */ | |
6218 | static void ice_vsi_release_all(struct ice_pf *pf) | |
6219 | { | |
6220 | int err, i; | |
6221 | ||
6222 | if (!pf->vsi) | |
6223 | return; | |
6224 | ||
80ed404a | 6225 | ice_for_each_vsi(pf, i) { |
0f9d5027 AV |
6226 | if (!pf->vsi[i]) |
6227 | continue; | |
6228 | ||
6229 | err = ice_vsi_release(pf->vsi[i]); | |
6230 | if (err) | |
19cce2c6 | 6231 | dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", |
0f9d5027 AV |
6232 | i, err, pf->vsi[i]->vsi_num); |
6233 | } | |
6234 | } | |
6235 | ||
0f9d5027 | 6236 | /** |
462acf6a TN |
6237 | * ice_vsi_rebuild_by_type - Rebuild VSI of a given type |
6238 | * @pf: pointer to the PF instance | |
6239 | * @type: VSI type to rebuild | |
6240 | * | |
6241 | * Iterates through the pf->vsi array and rebuilds VSIs of the requested type | |
0f9d5027 | 6242 | */ |
462acf6a | 6243 | static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) |
0f9d5027 | 6244 | { |
4015d11e | 6245 | struct device *dev = ice_pf_to_dev(pf); |
462acf6a TN |
6246 | enum ice_status status; |
6247 | int i, err; | |
0f9d5027 | 6248 | |
80ed404a | 6249 | ice_for_each_vsi(pf, i) { |
4425e053 | 6250 | struct ice_vsi *vsi = pf->vsi[i]; |
0f9d5027 | 6251 | |
462acf6a | 6252 | if (!vsi || vsi->type != type) |
0f9d5027 AV |
6253 | continue; |
6254 | ||
462acf6a | 6255 | /* rebuild the VSI */ |
87324e74 | 6256 | err = ice_vsi_rebuild(vsi, true); |
0f9d5027 | 6257 | if (err) { |
19cce2c6 | 6258 | dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", |
964674f1 | 6259 | err, vsi->idx, ice_vsi_type_str(type)); |
0f9d5027 AV |
6260 | return err; |
6261 | } | |
6262 | ||
462acf6a TN |
6263 | /* replay filters for the VSI */ |
6264 | status = ice_replay_vsi(&pf->hw, vsi->idx); | |
6265 | if (status) { | |
0fee3577 LY |
6266 | dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n", |
6267 | ice_stat_str(status), vsi->idx, | |
6268 | ice_vsi_type_str(type)); | |
462acf6a TN |
6269 | return -EIO; |
6270 | } | |
6271 | ||
6272 | /* Re-map HW VSI number, using VSI handle that has been | |
6273 | * previously validated in ice_replay_vsi() call above | |
6274 | */ | |
6275 | vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); | |
6276 | ||
6277 | /* enable the VSI */ | |
6278 | err = ice_ena_vsi(vsi, false); | |
6279 | if (err) { | |
19cce2c6 | 6280 | dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", |
964674f1 | 6281 | err, vsi->idx, ice_vsi_type_str(type)); |
462acf6a TN |
6282 | return err; |
6283 | } | |
6284 | ||
4015d11e BC |
6285 | dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, |
6286 | ice_vsi_type_str(type)); | |
0f9d5027 AV |
6287 | } |
6288 | ||
6289 | return 0; | |
0b28b702 AV |
6290 | } |
6291 | ||
334cb062 | 6292 | /** |
462acf6a TN |
6293 | * ice_update_pf_netdev_link - Update PF netdev link status |
6294 | * @pf: pointer to the PF instance | |
334cb062 | 6295 | */ |
462acf6a | 6296 | static void ice_update_pf_netdev_link(struct ice_pf *pf) |
334cb062 | 6297 | { |
462acf6a | 6298 | bool link_up; |
334cb062 AV |
6299 | int i; |
6300 | ||
80ed404a | 6301 | ice_for_each_vsi(pf, i) { |
4425e053 KK |
6302 | struct ice_vsi *vsi = pf->vsi[i]; |
6303 | ||
462acf6a TN |
6304 | if (!vsi || vsi->type != ICE_VSI_PF) |
6305 | return; | |
334cb062 | 6306 | |
462acf6a TN |
6307 | ice_get_link_status(pf->vsi[i]->port_info, &link_up); |
6308 | if (link_up) { | |
6309 | netif_carrier_on(pf->vsi[i]->netdev); | |
6310 | netif_tx_wake_all_queues(pf->vsi[i]->netdev); | |
6311 | } else { | |
6312 | netif_carrier_off(pf->vsi[i]->netdev); | |
6313 | netif_tx_stop_all_queues(pf->vsi[i]->netdev); | |
334cb062 | 6314 | } |
334cb062 | 6315 | } |
334cb062 AV |
6316 | } |
6317 | ||
0b28b702 AV |
6318 | /** |
6319 | * ice_rebuild - rebuild after reset | |
2f2da36e | 6320 | * @pf: PF to rebuild |
462acf6a | 6321 | * @reset_type: type of reset |
12bb018c BC |
6322 | * |
6323 | * Do not rebuild VF VSI in this flow because that is already handled via | |
6324 | * ice_reset_all_vfs(). This is because requirements for resetting a VF after a | |
6325 | * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want | |
6326 | * to reset/rebuild all the VF VSI twice. | |
0b28b702 | 6327 | */ |
462acf6a | 6328 | static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) |
0b28b702 | 6329 | { |
4015d11e | 6330 | struct device *dev = ice_pf_to_dev(pf); |
0b28b702 AV |
6331 | struct ice_hw *hw = &pf->hw; |
6332 | enum ice_status ret; | |
462acf6a | 6333 | int err; |
0b28b702 | 6334 | |
7e408e07 | 6335 | if (test_bit(ICE_DOWN, pf->state)) |
0b28b702 AV |
6336 | goto clear_recovery; |
6337 | ||
462acf6a | 6338 | dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); |
0b28b702 AV |
6339 | |
6340 | ret = ice_init_all_ctrlq(hw); | |
6341 | if (ret) { | |
0fee3577 LY |
6342 | dev_err(dev, "control queues init failed %s\n", |
6343 | ice_stat_str(ret)); | |
0f9d5027 | 6344 | goto err_init_ctrlq; |
0b28b702 AV |
6345 | } |
6346 | ||
462acf6a TN |
6347 | /* if DDP was previously loaded successfully */ |
6348 | if (!ice_is_safe_mode(pf)) { | |
6349 | /* reload the SW DB of filter tables */ | |
6350 | if (reset_type == ICE_RESET_PFR) | |
6351 | ice_fill_blk_tbls(hw); | |
6352 | else | |
6353 | /* Reload DDP Package after CORER/GLOBR reset */ | |
6354 | ice_load_pkg(NULL, pf); | |
6355 | } | |
6356 | ||
0b28b702 AV |
6357 | ret = ice_clear_pf_cfg(hw); |
6358 | if (ret) { | |
0fee3577 LY |
6359 | dev_err(dev, "clear PF configuration failed %s\n", |
6360 | ice_stat_str(ret)); | |
0f9d5027 | 6361 | goto err_init_ctrlq; |
0b28b702 AV |
6362 | } |
6363 | ||
fc0f39bc | 6364 | if (pf->first_sw->dflt_vsi_ena) |
19cce2c6 | 6365 | dev_info(dev, "Clearing default VSI, re-enable after reset completes\n"); |
fc0f39bc BC |
6366 | /* clear the default VSI configuration if it exists */ |
6367 | pf->first_sw->dflt_vsi = NULL; | |
6368 | pf->first_sw->dflt_vsi_ena = false; | |
6369 | ||
0b28b702 AV |
6370 | ice_clear_pxe_mode(hw); |
6371 | ||
97a4ec01 JK |
6372 | ret = ice_init_nvm(hw); |
6373 | if (ret) { | |
6374 | dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); | |
6375 | goto err_init_ctrlq; | |
6376 | } | |
6377 | ||
0b28b702 AV |
6378 | ret = ice_get_caps(hw); |
6379 | if (ret) { | |
0fee3577 | 6380 | dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); |
0f9d5027 | 6381 | goto err_init_ctrlq; |
0b28b702 AV |
6382 | } |
6383 | ||
42449105 AV |
6384 | ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); |
6385 | if (ret) { | |
6386 | dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret)); | |
6387 | goto err_init_ctrlq; | |
6388 | } | |
6389 | ||
0f9d5027 AV |
6390 | err = ice_sched_init_port(hw->port_info); |
6391 | if (err) | |
6392 | goto err_sched_init_port; | |
6393 | ||
0b28b702 | 6394 | /* start misc vector */ |
ba880734 BC |
6395 | err = ice_req_irq_msix_misc(pf); |
6396 | if (err) { | |
6397 | dev_err(dev, "misc vector setup failed: %d\n", err); | |
462acf6a | 6398 | goto err_sched_init_port; |
0b28b702 AV |
6399 | } |
6400 | ||
83af0039 HT |
6401 | if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { |
6402 | wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); | |
6403 | if (!rd32(hw, PFQF_FD_SIZE)) { | |
6404 | u16 unused, guar, b_effort; | |
6405 | ||
6406 | guar = hw->func_caps.fd_fltr_guar; | |
6407 | b_effort = hw->func_caps.fd_fltr_best_effort; | |
6408 | ||
6409 | /* force guaranteed filter pool for PF */ | |
6410 | ice_alloc_fd_guar_item(hw, &unused, guar); | |
6411 | /* force shared filter pool for PF */ | |
6412 | ice_alloc_fd_shrd_item(hw, &unused, b_effort); | |
6413 | } | |
6414 | } | |
6415 | ||
462acf6a TN |
6416 | if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) |
6417 | ice_dcb_rebuild(pf); | |
6418 | ||
06c16d89 JK |
6419 | /* If the PF previously had enabled PTP, PTP init needs to happen before |
6420 | * the VSI rebuild. If not, this causes the PTP link status events to | |
6421 | * fail. | |
6422 | */ | |
6423 | if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) | |
6424 | ice_ptp_init(pf); | |
6425 | ||
462acf6a TN |
6426 | /* rebuild PF VSI */ |
6427 | err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); | |
0f9d5027 | 6428 | if (err) { |
462acf6a | 6429 | dev_err(dev, "PF VSI rebuild failed: %d\n", err); |
0f9d5027 AV |
6430 | goto err_vsi_rebuild; |
6431 | } | |
0b28b702 | 6432 | |
83af0039 HT |
6433 | /* If Flow Director is active */ |
6434 | if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { | |
6435 | err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL); | |
6436 | if (err) { | |
6437 | dev_err(dev, "control VSI rebuild failed: %d\n", err); | |
6438 | goto err_vsi_rebuild; | |
6439 | } | |
6440 | ||
6441 | /* replay HW Flow Director recipes */ | |
6442 | if (hw->fdir_prof) | |
6443 | ice_fdir_replay_flows(hw); | |
6444 | ||
6445 | /* replay Flow Director filters */ | |
6446 | ice_fdir_replay_fltrs(pf); | |
28bf2672 BC |
6447 | |
6448 | ice_rebuild_arfs(pf); | |
83af0039 HT |
6449 | } |
6450 | ||
462acf6a TN |
6451 | ice_update_pf_netdev_link(pf); |
6452 | ||
6453 | /* tell the firmware we are up */ | |
6454 | ret = ice_send_version(pf); | |
6455 | if (ret) { | |
0fee3577 LY |
6456 | dev_err(dev, "Rebuild failed due to error sending driver version: %s\n", |
6457 | ice_stat_str(ret)); | |
462acf6a TN |
6458 | goto err_vsi_rebuild; |
6459 | } | |
6460 | ||
6461 | ice_replay_post(hw); | |
6462 | ||
0f9d5027 | 6463 | /* if we get here, reset flow is successful */ |
7e408e07 | 6464 | clear_bit(ICE_RESET_FAILED, pf->state); |
f9f5301e DE |
6465 | |
6466 | ice_plug_aux_dev(pf); | |
0b28b702 AV |
6467 | return; |
6468 | ||
0f9d5027 | 6469 | err_vsi_rebuild: |
0f9d5027 AV |
6470 | err_sched_init_port: |
6471 | ice_sched_cleanup_all(hw); | |
6472 | err_init_ctrlq: | |
0b28b702 | 6473 | ice_shutdown_all_ctrlq(hw); |
7e408e07 | 6474 | set_bit(ICE_RESET_FAILED, pf->state); |
0b28b702 | 6475 | clear_recovery: |
0f9d5027 | 6476 | /* set this bit in PF state to control service task scheduling */ |
7e408e07 | 6477 | set_bit(ICE_NEEDS_RESTART, pf->state); |
0f9d5027 | 6478 | dev_err(dev, "Rebuild failed, unload and reload driver\n"); |
0b28b702 AV |
6479 | } |
6480 | ||
23b44513 MF |
6481 | /** |
6482 | * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP | |
6483 | * @vsi: Pointer to VSI structure | |
6484 | */ | |
6485 | static int ice_max_xdp_frame_size(struct ice_vsi *vsi) | |
6486 | { | |
6487 | if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) | |
6488 | return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; | |
6489 | else | |
6490 | return ICE_RXBUF_3072; | |
6491 | } | |
6492 | ||
e94d4478 AV |
6493 | /** |
6494 | * ice_change_mtu - NDO callback to change the MTU | |
6495 | * @netdev: network interface device structure | |
6496 | * @new_mtu: new value for maximum frame size | |
6497 | * | |
6498 | * Returns 0 on success, negative on failure | |
6499 | */ | |
6500 | static int ice_change_mtu(struct net_device *netdev, int new_mtu) | |
6501 | { | |
6502 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
6503 | struct ice_vsi *vsi = np->vsi; | |
6504 | struct ice_pf *pf = vsi->back; | |
348048e7 | 6505 | struct iidc_event *event; |
e94d4478 | 6506 | u8 count = 0; |
348048e7 | 6507 | int err = 0; |
e94d4478 | 6508 | |
22bef5e7 | 6509 | if (new_mtu == (int)netdev->mtu) { |
2f2da36e | 6510 | netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); |
e94d4478 AV |
6511 | return 0; |
6512 | } | |
6513 | ||
efc2214b | 6514 | if (ice_is_xdp_ena_vsi(vsi)) { |
23b44513 | 6515 | int frame_size = ice_max_xdp_frame_size(vsi); |
efc2214b MF |
6516 | |
6517 | if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) { | |
6518 | netdev_err(netdev, "max MTU for XDP usage is %d\n", | |
23b44513 | 6519 | frame_size - ICE_ETH_PKT_HDR_PAD); |
efc2214b MF |
6520 | return -EINVAL; |
6521 | } | |
6522 | } | |
6523 | ||
e94d4478 AV |
6524 | /* if a reset is in progress, wait for some time for it to complete */ |
6525 | do { | |
5df7e45d | 6526 | if (ice_is_reset_in_progress(pf->state)) { |
e94d4478 AV |
6527 | count++; |
6528 | usleep_range(1000, 2000); | |
6529 | } else { | |
6530 | break; | |
6531 | } | |
6532 | ||
6533 | } while (count < 100); | |
6534 | ||
6535 | if (count == 100) { | |
2f2da36e | 6536 | netdev_err(netdev, "can't change MTU. Device is busy\n"); |
e94d4478 AV |
6537 | return -EBUSY; |
6538 | } | |
6539 | ||
348048e7 DE |
6540 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
6541 | if (!event) | |
6542 | return -ENOMEM; | |
6543 | ||
6544 | set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); | |
6545 | ice_send_event_to_aux(pf, event); | |
6546 | clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type); | |
6547 | ||
22bef5e7 | 6548 | netdev->mtu = (unsigned int)new_mtu; |
e94d4478 AV |
6549 | |
6550 | /* if VSI is up, bring it down and then back up */ | |
e97fb1ae | 6551 | if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { |
e94d4478 AV |
6552 | err = ice_down(vsi); |
6553 | if (err) { | |
fe6cd890 | 6554 | netdev_err(netdev, "change MTU if_down err %d\n", err); |
348048e7 | 6555 | goto event_after; |
e94d4478 AV |
6556 | } |
6557 | ||
6558 | err = ice_up(vsi); | |
6559 | if (err) { | |
2f2da36e | 6560 | netdev_err(netdev, "change MTU if_up err %d\n", err); |
348048e7 | 6561 | goto event_after; |
e94d4478 AV |
6562 | } |
6563 | } | |
6564 | ||
bda5b7db | 6565 | netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); |
348048e7 DE |
6566 | event_after: |
6567 | set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); | |
6568 | ice_send_event_to_aux(pf, event); | |
6569 | kfree(event); | |
6570 | ||
6571 | return err; | |
e94d4478 AV |
6572 | } |
6573 | ||
77a78115 | 6574 | /** |
a7605370 | 6575 | * ice_eth_ioctl - Access the hwtstamp interface |
77a78115 JK |
6576 | * @netdev: network interface device structure |
6577 | * @ifr: interface request data | |
6578 | * @cmd: ioctl command | |
6579 | */ | |
a7605370 | 6580 | static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
77a78115 JK |
6581 | { |
6582 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
6583 | struct ice_pf *pf = np->vsi->back; | |
6584 | ||
6585 | switch (cmd) { | |
6586 | case SIOCGHWTSTAMP: | |
6587 | return ice_ptp_get_ts_config(pf, ifr); | |
6588 | case SIOCSHWTSTAMP: | |
6589 | return ice_ptp_set_ts_config(pf, ifr); | |
6590 | default: | |
6591 | return -EOPNOTSUPP; | |
6592 | } | |
6593 | } | |
6594 | ||
0fee3577 LY |
6595 | /** |
6596 | * ice_aq_str - convert AQ err code to a string | |
6597 | * @aq_err: the AQ error code to convert | |
6598 | */ | |
6599 | const char *ice_aq_str(enum ice_aq_err aq_err) | |
6600 | { | |
6601 | switch (aq_err) { | |
6602 | case ICE_AQ_RC_OK: | |
6603 | return "OK"; | |
6604 | case ICE_AQ_RC_EPERM: | |
6605 | return "ICE_AQ_RC_EPERM"; | |
6606 | case ICE_AQ_RC_ENOENT: | |
6607 | return "ICE_AQ_RC_ENOENT"; | |
6608 | case ICE_AQ_RC_ENOMEM: | |
6609 | return "ICE_AQ_RC_ENOMEM"; | |
6610 | case ICE_AQ_RC_EBUSY: | |
6611 | return "ICE_AQ_RC_EBUSY"; | |
6612 | case ICE_AQ_RC_EEXIST: | |
6613 | return "ICE_AQ_RC_EEXIST"; | |
6614 | case ICE_AQ_RC_EINVAL: | |
6615 | return "ICE_AQ_RC_EINVAL"; | |
6616 | case ICE_AQ_RC_ENOSPC: | |
6617 | return "ICE_AQ_RC_ENOSPC"; | |
6618 | case ICE_AQ_RC_ENOSYS: | |
6619 | return "ICE_AQ_RC_ENOSYS"; | |
b5e19a64 CC |
6620 | case ICE_AQ_RC_EMODE: |
6621 | return "ICE_AQ_RC_EMODE"; | |
0fee3577 LY |
6622 | case ICE_AQ_RC_ENOSEC: |
6623 | return "ICE_AQ_RC_ENOSEC"; | |
6624 | case ICE_AQ_RC_EBADSIG: | |
6625 | return "ICE_AQ_RC_EBADSIG"; | |
6626 | case ICE_AQ_RC_ESVN: | |
6627 | return "ICE_AQ_RC_ESVN"; | |
6628 | case ICE_AQ_RC_EBADMAN: | |
6629 | return "ICE_AQ_RC_EBADMAN"; | |
6630 | case ICE_AQ_RC_EBADBUF: | |
6631 | return "ICE_AQ_RC_EBADBUF"; | |
6632 | } | |
6633 | ||
6634 | return "ICE_AQ_RC_UNKNOWN"; | |
6635 | } | |
6636 | ||
6637 | /** | |
6638 | * ice_stat_str - convert status err code to a string | |
6639 | * @stat_err: the status error code to convert | |
6640 | */ | |
6641 | const char *ice_stat_str(enum ice_status stat_err) | |
6642 | { | |
6643 | switch (stat_err) { | |
6644 | case ICE_SUCCESS: | |
6645 | return "OK"; | |
6646 | case ICE_ERR_PARAM: | |
6647 | return "ICE_ERR_PARAM"; | |
6648 | case ICE_ERR_NOT_IMPL: | |
6649 | return "ICE_ERR_NOT_IMPL"; | |
6650 | case ICE_ERR_NOT_READY: | |
6651 | return "ICE_ERR_NOT_READY"; | |
6652 | case ICE_ERR_NOT_SUPPORTED: | |
6653 | return "ICE_ERR_NOT_SUPPORTED"; | |
6654 | case ICE_ERR_BAD_PTR: | |
6655 | return "ICE_ERR_BAD_PTR"; | |
6656 | case ICE_ERR_INVAL_SIZE: | |
6657 | return "ICE_ERR_INVAL_SIZE"; | |
6658 | case ICE_ERR_DEVICE_NOT_SUPPORTED: | |
6659 | return "ICE_ERR_DEVICE_NOT_SUPPORTED"; | |
6660 | case ICE_ERR_RESET_FAILED: | |
6661 | return "ICE_ERR_RESET_FAILED"; | |
6662 | case ICE_ERR_FW_API_VER: | |
6663 | return "ICE_ERR_FW_API_VER"; | |
6664 | case ICE_ERR_NO_MEMORY: | |
6665 | return "ICE_ERR_NO_MEMORY"; | |
6666 | case ICE_ERR_CFG: | |
6667 | return "ICE_ERR_CFG"; | |
6668 | case ICE_ERR_OUT_OF_RANGE: | |
6669 | return "ICE_ERR_OUT_OF_RANGE"; | |
6670 | case ICE_ERR_ALREADY_EXISTS: | |
6671 | return "ICE_ERR_ALREADY_EXISTS"; | |
e120a9ab JK |
6672 | case ICE_ERR_NVM: |
6673 | return "ICE_ERR_NVM"; | |
0fee3577 LY |
6674 | case ICE_ERR_NVM_CHECKSUM: |
6675 | return "ICE_ERR_NVM_CHECKSUM"; | |
6676 | case ICE_ERR_BUF_TOO_SHORT: | |
6677 | return "ICE_ERR_BUF_TOO_SHORT"; | |
6678 | case ICE_ERR_NVM_BLANK_MODE: | |
6679 | return "ICE_ERR_NVM_BLANK_MODE"; | |
6680 | case ICE_ERR_IN_USE: | |
6681 | return "ICE_ERR_IN_USE"; | |
6682 | case ICE_ERR_MAX_LIMIT: | |
6683 | return "ICE_ERR_MAX_LIMIT"; | |
6684 | case ICE_ERR_RESET_ONGOING: | |
6685 | return "ICE_ERR_RESET_ONGOING"; | |
6686 | case ICE_ERR_HW_TABLE: | |
6687 | return "ICE_ERR_HW_TABLE"; | |
6688 | case ICE_ERR_DOES_NOT_EXIST: | |
6689 | return "ICE_ERR_DOES_NOT_EXIST"; | |
b8272919 VR |
6690 | case ICE_ERR_FW_DDP_MISMATCH: |
6691 | return "ICE_ERR_FW_DDP_MISMATCH"; | |
0fee3577 LY |
6692 | case ICE_ERR_AQ_ERROR: |
6693 | return "ICE_ERR_AQ_ERROR"; | |
6694 | case ICE_ERR_AQ_TIMEOUT: | |
6695 | return "ICE_ERR_AQ_TIMEOUT"; | |
6696 | case ICE_ERR_AQ_FULL: | |
6697 | return "ICE_ERR_AQ_FULL"; | |
6698 | case ICE_ERR_AQ_NO_WORK: | |
6699 | return "ICE_ERR_AQ_NO_WORK"; | |
6700 | case ICE_ERR_AQ_EMPTY: | |
6701 | return "ICE_ERR_AQ_EMPTY"; | |
b5c7f857 ES |
6702 | case ICE_ERR_AQ_FW_CRITICAL: |
6703 | return "ICE_ERR_AQ_FW_CRITICAL"; | |
0fee3577 LY |
6704 | } |
6705 | ||
6706 | return "ICE_ERR_UNKNOWN"; | |
6707 | } | |
6708 | ||
d76a60ba | 6709 | /** |
b66a972a | 6710 | * ice_set_rss_lut - Set RSS LUT |
d76a60ba | 6711 | * @vsi: Pointer to VSI structure |
d76a60ba AV |
6712 | * @lut: Lookup table |
6713 | * @lut_size: Lookup table size | |
6714 | * | |
6715 | * Returns 0 on success, negative on failure | |
6716 | */ | |
b66a972a | 6717 | int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) |
d76a60ba | 6718 | { |
b66a972a BC |
6719 | struct ice_aq_get_set_rss_lut_params params = {}; |
6720 | struct ice_hw *hw = &vsi->back->hw; | |
d76a60ba AV |
6721 | enum ice_status status; |
6722 | ||
b66a972a BC |
6723 | if (!lut) |
6724 | return -EINVAL; | |
d76a60ba | 6725 | |
b66a972a BC |
6726 | params.vsi_handle = vsi->idx; |
6727 | params.lut_size = lut_size; | |
6728 | params.lut_type = vsi->rss_lut_type; | |
6729 | params.lut = lut; | |
d76a60ba | 6730 | |
b66a972a BC |
6731 | status = ice_aq_set_rss_lut(hw, ¶ms); |
6732 | if (status) { | |
6733 | dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n", | |
6734 | ice_stat_str(status), | |
6735 | ice_aq_str(hw->adminq.sq_last_status)); | |
6736 | return -EIO; | |
d76a60ba AV |
6737 | } |
6738 | ||
b66a972a BC |
6739 | return 0; |
6740 | } | |
e3c53928 | 6741 | |
b66a972a BC |
6742 | /** |
6743 | * ice_set_rss_key - Set RSS key | |
6744 | * @vsi: Pointer to the VSI structure | |
6745 | * @seed: RSS hash seed | |
6746 | * | |
6747 | * Returns 0 on success, negative on failure | |
6748 | */ | |
6749 | int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed) | |
6750 | { | |
6751 | struct ice_hw *hw = &vsi->back->hw; | |
6752 | enum ice_status status; | |
6753 | ||
6754 | if (!seed) | |
6755 | return -EINVAL; | |
6756 | ||
6757 | status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); | |
6758 | if (status) { | |
6759 | dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n", | |
6760 | ice_stat_str(status), | |
6761 | ice_aq_str(hw->adminq.sq_last_status)); | |
6762 | return -EIO; | |
d76a60ba AV |
6763 | } |
6764 | ||
6765 | return 0; | |
6766 | } | |
6767 | ||
6768 | /** | |
b66a972a | 6769 | * ice_get_rss_lut - Get RSS LUT |
d76a60ba | 6770 | * @vsi: Pointer to VSI structure |
d76a60ba AV |
6771 | * @lut: Buffer to store the lookup table entries |
6772 | * @lut_size: Size of buffer to store the lookup table entries | |
6773 | * | |
6774 | * Returns 0 on success, negative on failure | |
6775 | */ | |
b66a972a | 6776 | int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size) |
d76a60ba | 6777 | { |
b66a972a BC |
6778 | struct ice_aq_get_set_rss_lut_params params = {}; |
6779 | struct ice_hw *hw = &vsi->back->hw; | |
d76a60ba AV |
6780 | enum ice_status status; |
6781 | ||
b66a972a BC |
6782 | if (!lut) |
6783 | return -EINVAL; | |
d76a60ba | 6784 | |
b66a972a BC |
6785 | params.vsi_handle = vsi->idx; |
6786 | params.lut_size = lut_size; | |
6787 | params.lut_type = vsi->rss_lut_type; | |
6788 | params.lut = lut; | |
6789 | ||
6790 | status = ice_aq_get_rss_lut(hw, ¶ms); | |
6791 | if (status) { | |
6792 | dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n", | |
6793 | ice_stat_str(status), | |
6794 | ice_aq_str(hw->adminq.sq_last_status)); | |
6795 | return -EIO; | |
d76a60ba AV |
6796 | } |
6797 | ||
b66a972a BC |
6798 | return 0; |
6799 | } | |
e3c53928 | 6800 | |
b66a972a BC |
6801 | /** |
6802 | * ice_get_rss_key - Get RSS key | |
6803 | * @vsi: Pointer to VSI structure | |
6804 | * @seed: Buffer to store the key in | |
6805 | * | |
6806 | * Returns 0 on success, negative on failure | |
6807 | */ | |
6808 | int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed) | |
6809 | { | |
6810 | struct ice_hw *hw = &vsi->back->hw; | |
6811 | enum ice_status status; | |
6812 | ||
6813 | if (!seed) | |
6814 | return -EINVAL; | |
6815 | ||
6816 | status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); | |
6817 | if (status) { | |
6818 | dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n", | |
6819 | ice_stat_str(status), | |
6820 | ice_aq_str(hw->adminq.sq_last_status)); | |
6821 | return -EIO; | |
d76a60ba AV |
6822 | } |
6823 | ||
6824 | return 0; | |
6825 | } | |
6826 | ||
b1edc14a MFIP |
6827 | /** |
6828 | * ice_bridge_getlink - Get the hardware bridge mode | |
6829 | * @skb: skb buff | |
f9867df6 | 6830 | * @pid: process ID |
b1edc14a MFIP |
6831 | * @seq: RTNL message seq |
6832 | * @dev: the netdev being configured | |
6833 | * @filter_mask: filter mask passed in | |
6834 | * @nlflags: netlink flags passed in | |
6835 | * | |
6836 | * Return the bridge mode (VEB/VEPA) | |
6837 | */ | |
6838 | static int | |
6839 | ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |
6840 | struct net_device *dev, u32 filter_mask, int nlflags) | |
6841 | { | |
6842 | struct ice_netdev_priv *np = netdev_priv(dev); | |
6843 | struct ice_vsi *vsi = np->vsi; | |
6844 | struct ice_pf *pf = vsi->back; | |
6845 | u16 bmode; | |
6846 | ||
6847 | bmode = pf->first_sw->bridge_mode; | |
6848 | ||
6849 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, | |
6850 | filter_mask, NULL); | |
6851 | } | |
6852 | ||
6853 | /** | |
6854 | * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) | |
6855 | * @vsi: Pointer to VSI structure | |
6856 | * @bmode: Hardware bridge mode (VEB/VEPA) | |
6857 | * | |
6858 | * Returns 0 on success, negative on failure | |
6859 | */ | |
6860 | static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) | |
6861 | { | |
b1edc14a MFIP |
6862 | struct ice_aqc_vsi_props *vsi_props; |
6863 | struct ice_hw *hw = &vsi->back->hw; | |
198a666a | 6864 | struct ice_vsi_ctx *ctxt; |
b1edc14a | 6865 | enum ice_status status; |
198a666a | 6866 | int ret = 0; |
b1edc14a MFIP |
6867 | |
6868 | vsi_props = &vsi->info; | |
198a666a | 6869 | |
9efe35d0 | 6870 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
198a666a BA |
6871 | if (!ctxt) |
6872 | return -ENOMEM; | |
6873 | ||
6874 | ctxt->info = vsi->info; | |
b1edc14a MFIP |
6875 | |
6876 | if (bmode == BRIDGE_MODE_VEB) | |
6877 | /* change from VEPA to VEB mode */ | |
198a666a | 6878 | ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
b1edc14a MFIP |
6879 | else |
6880 | /* change from VEB to VEPA mode */ | |
198a666a BA |
6881 | ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; |
6882 | ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); | |
5726ca0e | 6883 | |
198a666a | 6884 | status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); |
b1edc14a | 6885 | if (status) { |
0fee3577 LY |
6886 | dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n", |
6887 | bmode, ice_stat_str(status), | |
6888 | ice_aq_str(hw->adminq.sq_last_status)); | |
198a666a BA |
6889 | ret = -EIO; |
6890 | goto out; | |
b1edc14a MFIP |
6891 | } |
6892 | /* Update sw flags for book keeping */ | |
198a666a | 6893 | vsi_props->sw_flags = ctxt->info.sw_flags; |
b1edc14a | 6894 | |
198a666a | 6895 | out: |
9efe35d0 | 6896 | kfree(ctxt); |
198a666a | 6897 | return ret; |
b1edc14a MFIP |
6898 | } |
6899 | ||
6900 | /** | |
6901 | * ice_bridge_setlink - Set the hardware bridge mode | |
6902 | * @dev: the netdev being configured | |
6903 | * @nlh: RTNL message | |
6904 | * @flags: bridge setlink flags | |
2fd527b7 | 6905 | * @extack: netlink extended ack |
b1edc14a MFIP |
6906 | * |
6907 | * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is | |
6908 | * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if | |
6909 | * not already set for all VSIs connected to this switch. And also update the | |
6910 | * unicast switch filter rules for the corresponding switch of the netdev. | |
6911 | */ | |
6912 | static int | |
6913 | ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, | |
3d505147 BA |
6914 | u16 __always_unused flags, |
6915 | struct netlink_ext_ack __always_unused *extack) | |
b1edc14a MFIP |
6916 | { |
6917 | struct ice_netdev_priv *np = netdev_priv(dev); | |
6918 | struct ice_pf *pf = np->vsi->back; | |
6919 | struct nlattr *attr, *br_spec; | |
6920 | struct ice_hw *hw = &pf->hw; | |
6921 | enum ice_status status; | |
6922 | struct ice_sw *pf_sw; | |
6923 | int rem, v, err = 0; | |
6924 | ||
6925 | pf_sw = pf->first_sw; | |
6926 | /* find the attribute in the netlink message */ | |
6927 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); | |
6928 | ||
6929 | nla_for_each_nested(attr, br_spec, rem) { | |
6930 | __u16 mode; | |
6931 | ||
6932 | if (nla_type(attr) != IFLA_BRIDGE_MODE) | |
6933 | continue; | |
6934 | mode = nla_get_u16(attr); | |
6935 | if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) | |
6936 | return -EINVAL; | |
6937 | /* Continue if bridge mode is not being flipped */ | |
6938 | if (mode == pf_sw->bridge_mode) | |
6939 | continue; | |
6940 | /* Iterates through the PF VSI list and update the loopback | |
6941 | * mode of the VSI | |
6942 | */ | |
6943 | ice_for_each_vsi(pf, v) { | |
6944 | if (!pf->vsi[v]) | |
6945 | continue; | |
6946 | err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); | |
6947 | if (err) | |
6948 | return err; | |
6949 | } | |
6950 | ||
6951 | hw->evb_veb = (mode == BRIDGE_MODE_VEB); | |
6952 | /* Update the unicast switch filter rules for the corresponding | |
6953 | * switch of the netdev | |
6954 | */ | |
6955 | status = ice_update_sw_rule_bridge_mode(hw); | |
6956 | if (status) { | |
0fee3577 LY |
6957 | netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n", |
6958 | mode, ice_stat_str(status), | |
6959 | ice_aq_str(hw->adminq.sq_last_status)); | |
b1edc14a MFIP |
6960 | /* revert hw->evb_veb */ |
6961 | hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); | |
6962 | return -EIO; | |
6963 | } | |
6964 | ||
6965 | pf_sw->bridge_mode = mode; | |
6966 | } | |
6967 | ||
6968 | return 0; | |
6969 | } | |
6970 | ||
b3969fd7 SM |
6971 | /** |
6972 | * ice_tx_timeout - Respond to a Tx Hang | |
6973 | * @netdev: network interface device structure | |
644f40ea | 6974 | * @txqueue: Tx queue |
b3969fd7 | 6975 | */ |
0290bd29 | 6976 | static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
b3969fd7 SM |
6977 | { |
6978 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
6979 | struct ice_ring *tx_ring = NULL; | |
6980 | struct ice_vsi *vsi = np->vsi; | |
6981 | struct ice_pf *pf = vsi->back; | |
807bc98d | 6982 | u32 i; |
b3969fd7 SM |
6983 | |
6984 | pf->tx_timeout_count++; | |
6985 | ||
610ed0e9 AJ |
6986 | /* Check if PFC is enabled for the TC to which the queue belongs |
6987 | * to. If yes then Tx timeout is not caused by a hung queue, no | |
6988 | * need to reset and rebuild | |
6989 | */ | |
6990 | if (ice_is_pfc_causing_hung_q(pf, txqueue)) { | |
6991 | dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n", | |
6992 | txqueue); | |
6993 | return; | |
6994 | } | |
6995 | ||
ed5a3f66 JF |
6996 | /* now that we have an index, find the tx_ring struct */ |
6997 | for (i = 0; i < vsi->num_txq; i++) | |
6998 | if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) | |
6999 | if (txqueue == vsi->tx_rings[i]->q_index) { | |
7000 | tx_ring = vsi->tx_rings[i]; | |
7001 | break; | |
7002 | } | |
b3969fd7 SM |
7003 | |
7004 | /* Reset recovery level if enough time has elapsed after last timeout. | |
7005 | * Also ensure no new reset action happens before next timeout period. | |
7006 | */ | |
7007 | if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) | |
7008 | pf->tx_timeout_recovery_level = 1; | |
7009 | else if (time_before(jiffies, (pf->tx_timeout_last_recovery + | |
7010 | netdev->watchdog_timeo))) | |
7011 | return; | |
7012 | ||
7013 | if (tx_ring) { | |
807bc98d BC |
7014 | struct ice_hw *hw = &pf->hw; |
7015 | u32 head, val = 0; | |
7016 | ||
ed5a3f66 | 7017 | head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) & |
807bc98d | 7018 | QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; |
b3969fd7 | 7019 | /* Read interrupt register */ |
ba880734 | 7020 | val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); |
b3969fd7 | 7021 | |
93ff4858 | 7022 | netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", |
ed5a3f66 | 7023 | vsi->vsi_num, txqueue, tx_ring->next_to_clean, |
807bc98d | 7024 | head, tx_ring->next_to_use, val); |
b3969fd7 SM |
7025 | } |
7026 | ||
7027 | pf->tx_timeout_last_recovery = jiffies; | |
93ff4858 | 7028 | netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n", |
ed5a3f66 | 7029 | pf->tx_timeout_recovery_level, txqueue); |
b3969fd7 SM |
7030 | |
7031 | switch (pf->tx_timeout_recovery_level) { | |
7032 | case 1: | |
7e408e07 | 7033 | set_bit(ICE_PFR_REQ, pf->state); |
b3969fd7 SM |
7034 | break; |
7035 | case 2: | |
7e408e07 | 7036 | set_bit(ICE_CORER_REQ, pf->state); |
b3969fd7 SM |
7037 | break; |
7038 | case 3: | |
7e408e07 | 7039 | set_bit(ICE_GLOBR_REQ, pf->state); |
b3969fd7 SM |
7040 | break; |
7041 | default: | |
7042 | netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); | |
7e408e07 | 7043 | set_bit(ICE_DOWN, pf->state); |
e97fb1ae | 7044 | set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); |
7e408e07 | 7045 | set_bit(ICE_SERVICE_DIS, pf->state); |
b3969fd7 SM |
7046 | break; |
7047 | } | |
7048 | ||
7049 | ice_service_task_schedule(pf); | |
7050 | pf->tx_timeout_recovery_level++; | |
7051 | } | |
7052 | ||
cdedef59 AV |
7053 | /** |
7054 | * ice_open - Called when a network interface becomes active | |
7055 | * @netdev: network interface device structure | |
7056 | * | |
7057 | * The open entry point is called when a network interface is made | |
df17b7e0 | 7058 | * active by the system (IFF_UP). At this point all resources needed |
cdedef59 AV |
7059 | * for transmit and receive operations are allocated, the interrupt |
7060 | * handler is registered with the OS, the netdev watchdog is enabled, | |
7061 | * and the stack is notified that the interface is ready. | |
7062 | * | |
7063 | * Returns 0 on success, negative value on failure | |
7064 | */ | |
0e674aeb | 7065 | int ice_open(struct net_device *netdev) |
e95fc857 KG |
7066 | { |
7067 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
7068 | struct ice_pf *pf = np->vsi->back; | |
7069 | ||
7070 | if (ice_is_reset_in_progress(pf->state)) { | |
7071 | netdev_err(netdev, "can't open net device while reset is in progress"); | |
7072 | return -EBUSY; | |
7073 | } | |
7074 | ||
7075 | return ice_open_internal(netdev); | |
7076 | } | |
7077 | ||
7078 | /** | |
7079 | * ice_open_internal - Called when a network interface becomes active | |
7080 | * @netdev: network interface device structure | |
7081 | * | |
7082 | * Internal ice_open implementation. Should not be used directly except for ice_open and reset | |
7083 | * handling routine | |
7084 | * | |
7085 | * Returns 0 on success, negative value on failure | |
7086 | */ | |
7087 | int ice_open_internal(struct net_device *netdev) | |
cdedef59 AV |
7088 | { |
7089 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
7090 | struct ice_vsi *vsi = np->vsi; | |
de75135b | 7091 | struct ice_pf *pf = vsi->back; |
6d599946 | 7092 | struct ice_port_info *pi; |
d348d517 | 7093 | enum ice_status status; |
cdedef59 AV |
7094 | int err; |
7095 | ||
7e408e07 | 7096 | if (test_bit(ICE_NEEDS_RESTART, pf->state)) { |
0f9d5027 AV |
7097 | netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); |
7098 | return -EIO; | |
7099 | } | |
7100 | ||
cdedef59 AV |
7101 | netif_carrier_off(netdev); |
7102 | ||
6d599946 | 7103 | pi = vsi->port_info; |
d348d517 AV |
7104 | status = ice_update_link_info(pi); |
7105 | if (status) { | |
7106 | netdev_err(netdev, "Failed to get link info, error %s\n", | |
7107 | ice_stat_str(status)); | |
7108 | return -EIO; | |
b6f934f0 | 7109 | } |
cdedef59 | 7110 | |
c77849f5 AV |
7111 | ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); |
7112 | ||
6d599946 TN |
7113 | /* Set PHY if there is media, otherwise, turn off PHY */ |
7114 | if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { | |
1a3571b5 | 7115 | clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
7e408e07 | 7116 | if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { |
1a3571b5 PG |
7117 | err = ice_init_phy_user_cfg(pi); |
7118 | if (err) { | |
7119 | netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", | |
7120 | err); | |
7121 | return err; | |
7122 | } | |
7123 | } | |
7124 | ||
7125 | err = ice_configure_phy(vsi); | |
6d599946 | 7126 | if (err) { |
19cce2c6 | 7127 | netdev_err(netdev, "Failed to set physical link up, error %d\n", |
6d599946 TN |
7128 | err); |
7129 | return err; | |
7130 | } | |
7131 | } else { | |
1a3571b5 | 7132 | set_bit(ICE_FLAG_NO_MEDIA, pf->flags); |
d348d517 | 7133 | ice_set_link(vsi, false); |
6d599946 TN |
7134 | } |
7135 | ||
b6f934f0 | 7136 | err = ice_vsi_open(vsi); |
cdedef59 AV |
7137 | if (err) |
7138 | netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", | |
7139 | vsi->vsi_num, vsi->vsw->sw_id); | |
a4e82a81 TN |
7140 | |
7141 | /* Update existing tunnels information */ | |
7142 | udp_tunnel_get_rx_info(netdev); | |
7143 | ||
cdedef59 AV |
7144 | return err; |
7145 | } | |
7146 | ||
7147 | /** | |
7148 | * ice_stop - Disables a network interface | |
7149 | * @netdev: network interface device structure | |
7150 | * | |
7151 | * The stop entry point is called when an interface is de-activated by the OS, | |
df17b7e0 | 7152 | * and the netdevice enters the DOWN state. The hardware is still under the |
cdedef59 AV |
7153 | * driver's control, but the netdev interface is disabled. |
7154 | * | |
7155 | * Returns success only - not allowed to fail | |
7156 | */ | |
0e674aeb | 7157 | int ice_stop(struct net_device *netdev) |
cdedef59 AV |
7158 | { |
7159 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
7160 | struct ice_vsi *vsi = np->vsi; | |
e95fc857 KG |
7161 | struct ice_pf *pf = vsi->back; |
7162 | ||
7163 | if (ice_is_reset_in_progress(pf->state)) { | |
7164 | netdev_err(netdev, "can't stop net device while reset is in progress"); | |
7165 | return -EBUSY; | |
7166 | } | |
cdedef59 AV |
7167 | |
7168 | ice_vsi_close(vsi); | |
7169 | ||
7170 | return 0; | |
7171 | } | |
7172 | ||
e94d4478 AV |
7173 | /** |
7174 | * ice_features_check - Validate encapsulated packet conforms to limits | |
7175 | * @skb: skb buffer | |
7176 | * @netdev: This port's netdev | |
7177 | * @features: Offload features that the stack believes apply | |
7178 | */ | |
7179 | static netdev_features_t | |
7180 | ice_features_check(struct sk_buff *skb, | |
7181 | struct net_device __always_unused *netdev, | |
7182 | netdev_features_t features) | |
7183 | { | |
7184 | size_t len; | |
7185 | ||
7186 | /* No point in doing any of this if neither checksum nor GSO are | |
df17b7e0 | 7187 | * being requested for this frame. We can rule out both by just |
e94d4478 AV |
7188 | * checking for CHECKSUM_PARTIAL |
7189 | */ | |
7190 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
7191 | return features; | |
7192 | ||
7193 | /* We cannot support GSO if the MSS is going to be less than | |
df17b7e0 | 7194 | * 64 bytes. If it is then we need to drop support for GSO. |
e94d4478 AV |
7195 | */ |
7196 | if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) | |
7197 | features &= ~NETIF_F_GSO_MASK; | |
7198 | ||
7199 | len = skb_network_header(skb) - skb->data; | |
a4e82a81 | 7200 | if (len > ICE_TXD_MACLEN_MAX || len & 0x1) |
e94d4478 AV |
7201 | goto out_rm_features; |
7202 | ||
7203 | len = skb_transport_header(skb) - skb_network_header(skb); | |
a4e82a81 | 7204 | if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
e94d4478 AV |
7205 | goto out_rm_features; |
7206 | ||
7207 | if (skb->encapsulation) { | |
7208 | len = skb_inner_network_header(skb) - skb_transport_header(skb); | |
a4e82a81 | 7209 | if (len > ICE_TXD_L4LEN_MAX || len & 0x1) |
e94d4478 AV |
7210 | goto out_rm_features; |
7211 | ||
7212 | len = skb_inner_transport_header(skb) - | |
7213 | skb_inner_network_header(skb); | |
a4e82a81 | 7214 | if (len > ICE_TXD_IPLEN_MAX || len & 0x1) |
e94d4478 AV |
7215 | goto out_rm_features; |
7216 | } | |
7217 | ||
7218 | return features; | |
7219 | out_rm_features: | |
7220 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); | |
7221 | } | |
7222 | ||
462acf6a TN |
7223 | static const struct net_device_ops ice_netdev_safe_mode_ops = { |
7224 | .ndo_open = ice_open, | |
7225 | .ndo_stop = ice_stop, | |
7226 | .ndo_start_xmit = ice_start_xmit, | |
7227 | .ndo_set_mac_address = ice_set_mac_address, | |
7228 | .ndo_validate_addr = eth_validate_addr, | |
7229 | .ndo_change_mtu = ice_change_mtu, | |
7230 | .ndo_get_stats64 = ice_get_stats64, | |
7231 | .ndo_tx_timeout = ice_tx_timeout, | |
ebc5399e | 7232 | .ndo_bpf = ice_xdp_safe_mode, |
462acf6a TN |
7233 | }; |
7234 | ||
cdedef59 AV |
7235 | static const struct net_device_ops ice_netdev_ops = { |
7236 | .ndo_open = ice_open, | |
7237 | .ndo_stop = ice_stop, | |
2b245cb2 | 7238 | .ndo_start_xmit = ice_start_xmit, |
2a87bd73 | 7239 | .ndo_select_queue = ice_select_queue, |
e94d4478 AV |
7240 | .ndo_features_check = ice_features_check, |
7241 | .ndo_set_rx_mode = ice_set_rx_mode, | |
7242 | .ndo_set_mac_address = ice_set_mac_address, | |
7243 | .ndo_validate_addr = eth_validate_addr, | |
7244 | .ndo_change_mtu = ice_change_mtu, | |
fcea6f3d | 7245 | .ndo_get_stats64 = ice_get_stats64, |
1ddef455 | 7246 | .ndo_set_tx_maxrate = ice_set_tx_maxrate, |
a7605370 | 7247 | .ndo_eth_ioctl = ice_eth_ioctl, |
7c710869 AV |
7248 | .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, |
7249 | .ndo_set_vf_mac = ice_set_vf_mac, | |
7250 | .ndo_get_vf_config = ice_get_vf_cfg, | |
7251 | .ndo_set_vf_trust = ice_set_vf_trust, | |
7252 | .ndo_set_vf_vlan = ice_set_vf_port_vlan, | |
7253 | .ndo_set_vf_link_state = ice_set_vf_link_state, | |
730fdea4 | 7254 | .ndo_get_vf_stats = ice_get_vf_stats, |
d76a60ba AV |
7255 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
7256 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, | |
7257 | .ndo_set_features = ice_set_features, | |
b1edc14a MFIP |
7258 | .ndo_bridge_getlink = ice_bridge_getlink, |
7259 | .ndo_bridge_setlink = ice_bridge_setlink, | |
e94d4478 AV |
7260 | .ndo_fdb_add = ice_fdb_add, |
7261 | .ndo_fdb_del = ice_fdb_del, | |
28bf2672 BC |
7262 | #ifdef CONFIG_RFS_ACCEL |
7263 | .ndo_rx_flow_steer = ice_rx_flow_steer, | |
7264 | #endif | |
b3969fd7 | 7265 | .ndo_tx_timeout = ice_tx_timeout, |
efc2214b MF |
7266 | .ndo_bpf = ice_xdp, |
7267 | .ndo_xdp_xmit = ice_xdp_xmit, | |
2d4238f5 | 7268 | .ndo_xsk_wakeup = ice_xsk_wakeup, |
cdedef59 | 7269 | }; |