Merge branch 'mvpp2-per-cpu-buffers'
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_main.c
CommitLineData
837f08fd
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include "ice.h"
45d3d428 9#include "ice_lib.h"
37b6f646 10#include "ice_dcb_lib.h"
837f08fd 11
3015b8fc 12#define DRV_VERSION "0.7.5-k"
837f08fd 13#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
fcea6f3d 14const char ice_drv_ver[] = DRV_VERSION;
837f08fd
AV
15static const char ice_driver_string[] = DRV_SUMMARY;
16static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
17
18MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
19MODULE_DESCRIPTION(DRV_SUMMARY);
98674ebe 20MODULE_LICENSE("GPL v2");
837f08fd
AV
21MODULE_VERSION(DRV_VERSION);
22
23static int debug = -1;
24module_param(debug, int, 0644);
7ec59eea
AV
25#ifndef CONFIG_DYNAMIC_DEBUG
26MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
27#else
28MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
29#endif /* !CONFIG_DYNAMIC_DEBUG */
837f08fd 30
940b61af 31static struct workqueue_struct *ice_wq;
cdedef59 32static const struct net_device_ops ice_netdev_ops;
940b61af 33
0b28b702 34static void ice_rebuild(struct ice_pf *pf);
28c2a645 35
0f9d5027 36static void ice_vsi_release_all(struct ice_pf *pf);
3a858ba3 37
b3969fd7
SM
38/**
39 * ice_get_tx_pending - returns number of Tx descriptors not processed
40 * @ring: the ring of descriptors
41 */
c1ddf1f5 42static u16 ice_get_tx_pending(struct ice_ring *ring)
b3969fd7 43{
c1ddf1f5 44 u16 head, tail;
b3969fd7
SM
45
46 head = ring->next_to_clean;
c1ddf1f5 47 tail = ring->next_to_use;
b3969fd7
SM
48
49 if (head != tail)
50 return (head < tail) ?
51 tail - head : (tail + ring->count - head);
52 return 0;
53}
54
55/**
56 * ice_check_for_hang_subtask - check for and recover hung queues
57 * @pf: pointer to PF struct
58 */
59static void ice_check_for_hang_subtask(struct ice_pf *pf)
60{
61 struct ice_vsi *vsi = NULL;
e89e899f 62 struct ice_hw *hw;
b3969fd7 63 unsigned int i;
b3969fd7 64 int packets;
e89e899f 65 u32 v;
b3969fd7
SM
66
67 ice_for_each_vsi(pf, v)
68 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
69 vsi = pf->vsi[v];
70 break;
71 }
72
73 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
74 return;
75
76 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
77 return;
78
e89e899f
BC
79 hw = &vsi->back->hw;
80
b3969fd7
SM
81 for (i = 0; i < vsi->num_txq; i++) {
82 struct ice_ring *tx_ring = vsi->tx_rings[i];
83
84 if (tx_ring && tx_ring->desc) {
b3969fd7
SM
85 /* If packet counter has not changed the queue is
86 * likely stalled, so force an interrupt for this
87 * queue.
88 *
89 * prev_pkt would be negative if there was no
90 * pending work.
91 */
92 packets = tx_ring->stats.pkts & INT_MAX;
93 if (tx_ring->tx_stats.prev_pkt == packets) {
94 /* Trigger sw interrupt to revive the queue */
e89e899f 95 ice_trigger_sw_intr(hw, tx_ring->q_vector);
b3969fd7
SM
96 continue;
97 }
98
99 /* Memory barrier between read of packet count and call
100 * to ice_get_tx_pending()
101 */
102 smp_rmb();
103 tx_ring->tx_stats.prev_pkt =
104 ice_get_tx_pending(tx_ring) ? packets : -1;
105 }
106 }
107}
108
561f4379
TN
109/**
110 * ice_init_mac_fltr - Set initial MAC filters
111 * @pf: board private structure
112 *
2f2da36e 113 * Set initial set of MAC filters for PF VSI; configure filters for permanent
561f4379
TN
114 * address and broadcast address. If an error is encountered, netdevice will be
115 * unregistered.
116 */
117static int ice_init_mac_fltr(struct ice_pf *pf)
118{
bbb968e8 119 enum ice_status status;
561f4379
TN
120 u8 broadcast[ETH_ALEN];
121 struct ice_vsi *vsi;
561f4379
TN
122
123 vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
124 if (!vsi)
125 return -EINVAL;
126
127 /* To add a MAC filter, first add the MAC to a list and then
128 * pass the list to ice_add_mac.
129 */
130
131 /* Add a unicast MAC filter so the VSI can get its packets */
bbb968e8 132 status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
561f4379
TN
133 if (status)
134 goto unregister;
135
136 /* VSI needs to receive broadcast traffic, so add the broadcast
137 * MAC address to the list as well.
138 */
139 eth_broadcast_addr(broadcast);
bbb968e8 140 status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
561f4379 141 if (status)
bbb968e8 142 goto unregister;
561f4379 143
bbb968e8 144 return 0;
561f4379
TN
145unregister:
146 /* We aren't useful with no MAC filters, so unregister if we
147 * had an error
148 */
149 if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
150 dev_err(&pf->pdev->dev,
151 "Could not add MAC filters error %d. Unregistering device\n",
152 status);
153 unregister_netdev(vsi->netdev);
154 free_netdev(vsi->netdev);
155 vsi->netdev = NULL;
156 }
157
bbb968e8 158 return -EIO;
561f4379
TN
159}
160
e94d4478 161/**
f9867df6 162 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
e94d4478 163 * @netdev: the net device on which the sync is happening
f9867df6 164 * @addr: MAC address to sync
e94d4478
AV
165 *
166 * This is a callback function which is called by the in kernel device sync
167 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
168 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
f9867df6 169 * MAC filters from the hardware.
e94d4478
AV
170 */
171static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
172{
173 struct ice_netdev_priv *np = netdev_priv(netdev);
174 struct ice_vsi *vsi = np->vsi;
175
176 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
177 return -EINVAL;
178
179 return 0;
180}
181
182/**
f9867df6 183 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
e94d4478 184 * @netdev: the net device on which the unsync is happening
f9867df6 185 * @addr: MAC address to unsync
e94d4478
AV
186 *
187 * This is a callback function which is called by the in kernel device unsync
188 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
189 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
f9867df6 190 * delete the MAC filters from the hardware.
e94d4478
AV
191 */
192static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
193{
194 struct ice_netdev_priv *np = netdev_priv(netdev);
195 struct ice_vsi *vsi = np->vsi;
196
197 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
198 return -EINVAL;
199
200 return 0;
201}
202
e94d4478
AV
203/**
204 * ice_vsi_fltr_changed - check if filter state changed
205 * @vsi: VSI to be checked
206 *
207 * returns true if filter state has changed, false otherwise.
208 */
209static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
210{
211 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
212 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
213 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
214}
215
5eda8afd
AA
216/**
217 * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
218 * @vsi: the VSI being configured
219 * @promisc_m: mask of promiscuous config bits
220 * @set_promisc: enable or disable promisc flag request
221 *
222 */
223static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
224{
225 struct ice_hw *hw = &vsi->back->hw;
226 enum ice_status status = 0;
227
228 if (vsi->type != ICE_VSI_PF)
229 return 0;
230
231 if (vsi->vlan_ena) {
232 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
233 set_promisc);
234 } else {
235 if (set_promisc)
236 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
237 0);
238 else
239 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
240 0);
241 }
242
243 if (status)
244 return -EIO;
245
246 return 0;
247}
248
e94d4478
AV
249/**
250 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
251 * @vsi: ptr to the VSI
252 *
253 * Push any outstanding VSI filter changes through the AdminQ.
254 */
255static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
256{
257 struct device *dev = &vsi->back->pdev->dev;
258 struct net_device *netdev = vsi->netdev;
259 bool promisc_forced_on = false;
260 struct ice_pf *pf = vsi->back;
261 struct ice_hw *hw = &pf->hw;
262 enum ice_status status = 0;
263 u32 changed_flags = 0;
5eda8afd 264 u8 promisc_m;
e94d4478
AV
265 int err = 0;
266
267 if (!vsi->netdev)
268 return -EINVAL;
269
270 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
271 usleep_range(1000, 2000);
272
273 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
274 vsi->current_netdev_flags = vsi->netdev->flags;
275
276 INIT_LIST_HEAD(&vsi->tmp_sync_list);
277 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
278
279 if (ice_vsi_fltr_changed(vsi)) {
280 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
281 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
282 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
283
284 /* grab the netdev's addr_list_lock */
285 netif_addr_lock_bh(netdev);
286 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
287 ice_add_mac_to_unsync_list);
288 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
289 ice_add_mac_to_unsync_list);
290 /* our temp lists are populated. release lock */
291 netif_addr_unlock_bh(netdev);
292 }
293
f9867df6 294 /* Remove MAC addresses in the unsync list */
e94d4478
AV
295 status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
296 ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
297 if (status) {
298 netdev_err(netdev, "Failed to delete MAC filters\n");
299 /* if we failed because of alloc failures, just bail */
300 if (status == ICE_ERR_NO_MEMORY) {
301 err = -ENOMEM;
302 goto out;
303 }
304 }
305
f9867df6 306 /* Add MAC addresses in the sync list */
e94d4478
AV
307 status = ice_add_mac(hw, &vsi->tmp_sync_list);
308 ice_free_fltr_list(dev, &vsi->tmp_sync_list);
89f3e4a5
PB
309 /* If filter is added successfully or already exists, do not go into
310 * 'if' condition and report it as error. Instead continue processing
311 * rest of the function.
312 */
313 if (status && status != ICE_ERR_ALREADY_EXISTS) {
e94d4478 314 netdev_err(netdev, "Failed to add MAC filters\n");
f9867df6 315 /* If there is no more space for new umac filters, VSI
e94d4478
AV
316 * should go into promiscuous mode. There should be some
317 * space reserved for promiscuous filters.
318 */
319 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
320 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
321 vsi->state)) {
322 promisc_forced_on = true;
323 netdev_warn(netdev,
324 "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
325 vsi->vsi_num);
326 } else {
327 err = -EIO;
328 goto out;
329 }
330 }
331 /* check for changes in promiscuous modes */
5eda8afd
AA
332 if (changed_flags & IFF_ALLMULTI) {
333 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
334 if (vsi->vlan_ena)
335 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
336 else
337 promisc_m = ICE_MCAST_PROMISC_BITS;
338
339 err = ice_cfg_promisc(vsi, promisc_m, true);
340 if (err) {
341 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
342 vsi->vsi_num);
343 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
344 goto out_promisc;
345 }
346 } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
347 if (vsi->vlan_ena)
348 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
349 else
350 promisc_m = ICE_MCAST_PROMISC_BITS;
351
352 err = ice_cfg_promisc(vsi, promisc_m, false);
353 if (err) {
354 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
355 vsi->vsi_num);
356 vsi->current_netdev_flags |= IFF_ALLMULTI;
357 goto out_promisc;
358 }
359 }
360 }
e94d4478
AV
361
362 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
363 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
364 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
365 if (vsi->current_netdev_flags & IFF_PROMISC) {
f9867df6 366 /* Apply Rx filter rule to get traffic from wire */
4fb33f31 367 status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
e94d4478
AV
368 ICE_FLTR_RX);
369 if (status) {
64439f8f 370 netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
e94d4478
AV
371 vsi->vsi_num);
372 vsi->current_netdev_flags &= ~IFF_PROMISC;
373 err = -EIO;
374 goto out_promisc;
375 }
376 } else {
f9867df6 377 /* Clear Rx filter to remove traffic from wire */
4fb33f31 378 status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
e94d4478
AV
379 ICE_FLTR_RX);
380 if (status) {
64439f8f 381 netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
e94d4478
AV
382 vsi->vsi_num);
383 vsi->current_netdev_flags |= IFF_PROMISC;
384 err = -EIO;
385 goto out_promisc;
386 }
387 }
388 }
389 goto exit;
390
391out_promisc:
392 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
393 goto exit;
394out:
395 /* if something went wrong then set the changed flag so we try again */
396 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
397 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
398exit:
399 clear_bit(__ICE_CFG_BUSY, vsi->state);
400 return err;
401}
402
403/**
404 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
405 * @pf: board private structure
406 */
407static void ice_sync_fltr_subtask(struct ice_pf *pf)
408{
409 int v;
410
411 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
412 return;
413
414 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
415
80ed404a 416 ice_for_each_vsi(pf, v)
e94d4478
AV
417 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
418 ice_vsi_sync_fltr(pf->vsi[v])) {
419 /* come back and try again later */
420 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
421 break;
422 }
423}
424
7b9ffc76
AV
425/**
426 * ice_dis_vsi - pause a VSI
427 * @vsi: the VSI being paused
428 * @locked: is the rtnl_lock already held
429 */
430static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
431{
432 if (test_bit(__ICE_DOWN, vsi->state))
433 return;
434
435 set_bit(__ICE_NEEDS_RESTART, vsi->state);
436
437 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
438 if (netif_running(vsi->netdev)) {
f27db2e6 439 if (!locked)
7b9ffc76 440 rtnl_lock();
f27db2e6
AV
441
442 ice_stop(vsi->netdev);
443
444 if (!locked)
7b9ffc76 445 rtnl_unlock();
7b9ffc76
AV
446 } else {
447 ice_vsi_close(vsi);
448 }
449 }
450}
451
452/**
453 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
454 * @pf: the PF
455 * @locked: is the rtnl_lock already held
456 */
457#ifdef CONFIG_DCB
458void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
459#else
460static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
461#endif /* CONFIG_DCB */
462{
463 int v;
464
465 ice_for_each_vsi(pf, v)
466 if (pf->vsi[v])
467 ice_dis_vsi(pf->vsi[v], locked);
468}
469
0b28b702
AV
470/**
471 * ice_prepare_for_reset - prep for the core to reset
472 * @pf: board private structure
473 *
474 * Inform or close all dependent features in prep for reset.
475 */
476static void
477ice_prepare_for_reset(struct ice_pf *pf)
478{
479 struct ice_hw *hw = &pf->hw;
a1199d67 480 int i;
0b28b702 481
5abac9d7
BC
482 /* already prepared for reset */
483 if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
484 return;
485
007676b4
AV
486 /* Notify VFs of impending reset */
487 if (ice_check_sq_alive(hw, &hw->mailboxq))
488 ice_vc_notify_reset(pf);
489
c7aeb4d1
AA
490 /* Disable VFs until reset is completed */
491 for (i = 0; i < pf->num_alloc_vfs; i++)
77ca27c4 492 ice_set_vf_state_qs_dis(&pf->vf[i]);
c7aeb4d1 493
0b28b702 494 /* disable the VSIs and their queues that are not already DOWN */
7b9ffc76 495 ice_pf_dis_all_vsi(pf, false);
0b28b702 496
c5a2a4a3
UK
497 if (hw->port_info)
498 ice_sched_clear_port(hw->port_info);
499
0b28b702 500 ice_shutdown_all_ctrlq(hw);
0f9d5027
AV
501
502 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
0b28b702
AV
503}
504
505/**
506 * ice_do_reset - Initiate one of many types of resets
507 * @pf: board private structure
508 * @reset_type: reset type requested
509 * before this function was called.
510 */
511static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
512{
513 struct device *dev = &pf->pdev->dev;
514 struct ice_hw *hw = &pf->hw;
515
516 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
517 WARN_ON(in_interrupt());
518
0f9d5027 519 ice_prepare_for_reset(pf);
0b28b702
AV
520
521 /* trigger the reset */
522 if (ice_reset(hw, reset_type)) {
523 dev_err(dev, "reset %d failed\n", reset_type);
524 set_bit(__ICE_RESET_FAILED, pf->state);
5df7e45d 525 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
0f9d5027 526 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
5df7e45d
DE
527 clear_bit(__ICE_PFR_REQ, pf->state);
528 clear_bit(__ICE_CORER_REQ, pf->state);
529 clear_bit(__ICE_GLOBR_REQ, pf->state);
0b28b702
AV
530 return;
531 }
532
0f9d5027
AV
533 /* PFR is a bit of a special case because it doesn't result in an OICR
534 * interrupt. So for PFR, rebuild after the reset and clear the reset-
535 * associated state bits.
536 */
0b28b702
AV
537 if (reset_type == ICE_RESET_PFR) {
538 pf->pfr_count++;
539 ice_rebuild(pf);
0f9d5027 540 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
5df7e45d 541 clear_bit(__ICE_PFR_REQ, pf->state);
1c44e3bc 542 ice_reset_all_vfs(pf, true);
0b28b702
AV
543 }
544}
545
546/**
547 * ice_reset_subtask - Set up for resetting the device and driver
548 * @pf: board private structure
549 */
550static void ice_reset_subtask(struct ice_pf *pf)
551{
0f9d5027 552 enum ice_reset_req reset_type = ICE_RESET_INVAL;
0b28b702
AV
553
554 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
0f9d5027
AV
555 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
556 * of reset is pending and sets bits in pf->state indicating the reset
df17b7e0 557 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
0f9d5027
AV
558 * prepare for pending reset if not already (for PF software-initiated
559 * global resets the software should already be prepared for it as
560 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
561 * by firmware or software on other PFs, that bit is not set so prepare
562 * for the reset now), poll for reset done, rebuild and return.
0b28b702 563 */
5df7e45d 564 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
2ebd4428
DE
565 /* Perform the largest reset requested */
566 if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
567 reset_type = ICE_RESET_CORER;
568 if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
569 reset_type = ICE_RESET_GLOBR;
570 /* return if no valid reset type requested */
571 if (reset_type == ICE_RESET_INVAL)
572 return;
5abac9d7 573 ice_prepare_for_reset(pf);
0b28b702
AV
574
575 /* make sure we are ready to rebuild */
fd2a9817 576 if (ice_check_reset(&pf->hw)) {
0b28b702 577 set_bit(__ICE_RESET_FAILED, pf->state);
fd2a9817
AV
578 } else {
579 /* done with reset. start rebuild */
580 pf->hw.reset_ongoing = false;
0b28b702 581 ice_rebuild(pf);
0f9d5027 582 /* clear bit to resume normal operations, but
94c4441b 583 * ICE_NEEDS_RESTART bit is set in case rebuild failed
0f9d5027 584 */
5df7e45d 585 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
0f9d5027 586 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
5df7e45d
DE
587 clear_bit(__ICE_PFR_REQ, pf->state);
588 clear_bit(__ICE_CORER_REQ, pf->state);
589 clear_bit(__ICE_GLOBR_REQ, pf->state);
1c44e3bc 590 ice_reset_all_vfs(pf, true);
fd2a9817 591 }
0f9d5027
AV
592
593 return;
0b28b702
AV
594 }
595
596 /* No pending resets to finish processing. Check for new resets */
5df7e45d 597 if (test_bit(__ICE_PFR_REQ, pf->state))
0f9d5027 598 reset_type = ICE_RESET_PFR;
5df7e45d 599 if (test_bit(__ICE_CORER_REQ, pf->state))
0f9d5027 600 reset_type = ICE_RESET_CORER;
5df7e45d 601 if (test_bit(__ICE_GLOBR_REQ, pf->state))
0b28b702 602 reset_type = ICE_RESET_GLOBR;
0f9d5027
AV
603 /* If no valid reset type requested just return */
604 if (reset_type == ICE_RESET_INVAL)
605 return;
0b28b702 606
0f9d5027 607 /* reset if not already down or busy */
0b28b702
AV
608 if (!test_bit(__ICE_DOWN, pf->state) &&
609 !test_bit(__ICE_CFG_BUSY, pf->state)) {
610 ice_do_reset(pf, reset_type);
611 }
0b28b702
AV
612}
613
cdedef59
AV
614/**
615 * ice_print_link_msg - print link up or down message
616 * @vsi: the VSI whose link status is being queried
617 * @isup: boolean for if the link is now up or down
618 */
fcea6f3d 619void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
cdedef59 620{
f776b3ac
PG
621 struct ice_aqc_get_phy_caps_data *caps;
622 enum ice_status status;
623 const char *fec_req;
cdedef59 624 const char *speed;
f776b3ac 625 const char *fec;
cdedef59
AV
626 const char *fc;
627
c2a23e00
BC
628 if (!vsi)
629 return;
630
cdedef59
AV
631 if (vsi->current_isup == isup)
632 return;
633
634 vsi->current_isup = isup;
635
636 if (!isup) {
637 netdev_info(vsi->netdev, "NIC Link is Down\n");
638 return;
639 }
640
641 switch (vsi->port_info->phy.link_info.link_speed) {
072efdf8
AV
642 case ICE_AQ_LINK_SPEED_100GB:
643 speed = "100 G";
644 break;
645 case ICE_AQ_LINK_SPEED_50GB:
646 speed = "50 G";
647 break;
cdedef59
AV
648 case ICE_AQ_LINK_SPEED_40GB:
649 speed = "40 G";
650 break;
651 case ICE_AQ_LINK_SPEED_25GB:
652 speed = "25 G";
653 break;
654 case ICE_AQ_LINK_SPEED_20GB:
655 speed = "20 G";
656 break;
657 case ICE_AQ_LINK_SPEED_10GB:
658 speed = "10 G";
659 break;
660 case ICE_AQ_LINK_SPEED_5GB:
661 speed = "5 G";
662 break;
663 case ICE_AQ_LINK_SPEED_2500MB:
664 speed = "2.5 G";
665 break;
666 case ICE_AQ_LINK_SPEED_1000MB:
667 speed = "1 G";
668 break;
669 case ICE_AQ_LINK_SPEED_100MB:
670 speed = "100 M";
671 break;
672 default:
673 speed = "Unknown";
674 break;
675 }
676
677 switch (vsi->port_info->fc.current_mode) {
678 case ICE_FC_FULL:
2f2da36e 679 fc = "Rx/Tx";
cdedef59
AV
680 break;
681 case ICE_FC_TX_PAUSE:
2f2da36e 682 fc = "Tx";
cdedef59
AV
683 break;
684 case ICE_FC_RX_PAUSE:
2f2da36e 685 fc = "Rx";
cdedef59 686 break;
203a068a
BC
687 case ICE_FC_NONE:
688 fc = "None";
689 break;
cdedef59
AV
690 default:
691 fc = "Unknown";
692 break;
693 }
694
f776b3ac
PG
695 /* Get FEC mode based on negotiated link info */
696 switch (vsi->port_info->phy.link_info.fec_info) {
697 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
698 /* fall through */
699 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
700 fec = "RS-FEC";
701 break;
702 case ICE_AQ_LINK_25G_KR_FEC_EN:
703 fec = "FC-FEC/BASE-R";
704 break;
705 default:
706 fec = "NONE";
707 break;
708 }
709
710 /* Get FEC mode requested based on PHY caps last SW configuration */
711 caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
712 if (!caps) {
713 fec_req = "Unknown";
714 goto done;
715 }
716
717 status = ice_aq_get_phy_caps(vsi->port_info, false,
718 ICE_AQC_REPORT_SW_CFG, caps, NULL);
719 if (status)
720 netdev_info(vsi->netdev, "Get phy capability failed.\n");
721
722 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
723 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
724 fec_req = "RS-FEC";
725 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
726 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
727 fec_req = "FC-FEC/BASE-R";
728 else
729 fec_req = "NONE";
730
731 devm_kfree(&vsi->back->pdev->dev, caps);
732
733done:
734 netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Flow Control: %s\n",
735 speed, fec_req, fec, fc);
cdedef59
AV
736}
737
0b28b702 738/**
f9867df6
AV
739 * ice_vsi_link_event - update the VSI's netdev
740 * @vsi: the VSI on which the link event occurred
741 * @link_up: whether or not the VSI needs to be set up or down
0b28b702
AV
742 */
743static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
744{
c2a23e00
BC
745 if (!vsi)
746 return;
747
748 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
0b28b702
AV
749 return;
750
751 if (vsi->type == ICE_VSI_PF) {
c2a23e00 752 if (link_up == netif_carrier_ok(vsi->netdev))
0b28b702 753 return;
c2a23e00 754
0b28b702
AV
755 if (link_up) {
756 netif_carrier_on(vsi->netdev);
757 netif_tx_wake_all_queues(vsi->netdev);
758 } else {
759 netif_carrier_off(vsi->netdev);
760 netif_tx_stop_all_queues(vsi->netdev);
761 }
762 }
763}
764
765/**
766 * ice_link_event - process the link event
2f2da36e 767 * @pf: PF that the link event is associated with
0b28b702 768 * @pi: port_info for the port that the link event is associated with
c2a23e00
BC
769 * @link_up: true if the physical link is up and false if it is down
770 * @link_speed: current link speed received from the link event
0b28b702 771 *
c2a23e00 772 * Returns 0 on success and negative on failure
0b28b702
AV
773 */
774static int
c2a23e00
BC
775ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
776 u16 link_speed)
0b28b702 777{
0b28b702 778 struct ice_phy_info *phy_info;
c2a23e00
BC
779 struct ice_vsi *vsi;
780 u16 old_link_speed;
781 bool old_link;
782 int result;
0b28b702
AV
783
784 phy_info = &pi->phy;
785 phy_info->link_info_old = phy_info->link_info;
0b28b702 786
c2a23e00 787 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
0b28b702
AV
788 old_link_speed = phy_info->link_info_old.link_speed;
789
c2a23e00
BC
790 /* update the link info structures and re-enable link events,
791 * don't bail on failure due to other book keeping needed
792 */
793 result = ice_update_link_info(pi);
794 if (result)
0b28b702 795 dev_dbg(&pf->pdev->dev,
c2a23e00
BC
796 "Failed to update link status and re-enable link events for port %d\n",
797 pi->lport);
0b28b702 798
c2a23e00
BC
799 /* if the old link up/down and speed is the same as the new */
800 if (link_up == old_link && link_speed == old_link_speed)
801 return result;
0b28b702 802
c2a23e00
BC
803 vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
804 if (!vsi || !vsi->port_info)
805 return -EINVAL;
0b28b702 806
6d599946
TN
807 /* turn off PHY if media was removed */
808 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
809 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
810 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
811
812 result = ice_aq_set_link_restart_an(pi, false, NULL);
813 if (result) {
814 dev_dbg(&pf->pdev->dev,
815 "Failed to set link down, VSI %d error %d\n",
816 vsi->vsi_num, result);
817 return result;
818 }
819 }
820
c2a23e00
BC
821 ice_vsi_link_event(vsi, link_up);
822 ice_print_link_msg(vsi, link_up);
0b28b702 823
c2a23e00 824 if (pf->num_alloc_vfs)
4cf7bc0d 825 ice_vc_notify_link_state(pf);
53b8decb 826
c2a23e00 827 return result;
0b28b702
AV
828}
829
830/**
4f4be03b
AV
831 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
832 * @pf: board private structure
0b28b702 833 */
4f4be03b 834static void ice_watchdog_subtask(struct ice_pf *pf)
0b28b702 835{
4f4be03b 836 int i;
0b28b702 837
4f4be03b
AV
838 /* if interface is down do nothing */
839 if (test_bit(__ICE_DOWN, pf->state) ||
840 test_bit(__ICE_CFG_BUSY, pf->state))
841 return;
0b28b702 842
4f4be03b
AV
843 /* make sure we don't do these things too often */
844 if (time_before(jiffies,
845 pf->serv_tmr_prev + pf->serv_tmr_period))
846 return;
0b28b702 847
4f4be03b
AV
848 pf->serv_tmr_prev = jiffies;
849
4f4be03b
AV
850 /* Update the stats for active netdevs so the network stack
851 * can look at updated numbers whenever it cares to
852 */
853 ice_update_pf_stats(pf);
80ed404a 854 ice_for_each_vsi(pf, i)
4f4be03b
AV
855 if (pf->vsi[i] && pf->vsi[i]->netdev)
856 ice_update_vsi_stats(pf->vsi[i]);
0b28b702
AV
857}
858
250c3b3e
BC
859/**
860 * ice_init_link_events - enable/initialize link events
861 * @pi: pointer to the port_info instance
862 *
863 * Returns -EIO on failure, 0 on success
864 */
865static int ice_init_link_events(struct ice_port_info *pi)
866{
867 u16 mask;
868
869 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
870 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
871
872 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
873 dev_dbg(ice_hw_to_dev(pi->hw),
874 "Failed to set link event mask for port %d\n",
875 pi->lport);
876 return -EIO;
877 }
878
879 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
880 dev_dbg(ice_hw_to_dev(pi->hw),
881 "Failed to enable link events for port %d\n",
882 pi->lport);
883 return -EIO;
884 }
885
886 return 0;
887}
888
889/**
890 * ice_handle_link_event - handle link event via ARQ
2f2da36e 891 * @pf: PF that the link event is associated with
c2a23e00 892 * @event: event structure containing link status info
250c3b3e 893 */
c2a23e00
BC
894static int
895ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
250c3b3e 896{
c2a23e00 897 struct ice_aqc_get_link_status_data *link_data;
250c3b3e
BC
898 struct ice_port_info *port_info;
899 int status;
900
c2a23e00 901 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
250c3b3e
BC
902 port_info = pf->hw.port_info;
903 if (!port_info)
904 return -EINVAL;
905
c2a23e00
BC
906 status = ice_link_event(pf, port_info,
907 !!(link_data->link_info & ICE_AQ_LINK_UP),
908 le16_to_cpu(link_data->link_speed));
250c3b3e
BC
909 if (status)
910 dev_dbg(&pf->pdev->dev,
911 "Could not process link event, error %d\n", status);
912
913 return status;
914}
915
940b61af
AV
916/**
917 * __ice_clean_ctrlq - helper function to clean controlq rings
918 * @pf: ptr to struct ice_pf
919 * @q_type: specific Control queue type
920 */
921static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
922{
923 struct ice_rq_event_info event;
924 struct ice_hw *hw = &pf->hw;
925 struct ice_ctl_q_info *cq;
926 u16 pending, i = 0;
927 const char *qtype;
928 u32 oldval, val;
929
0b28b702
AV
930 /* Do not clean control queue if/when PF reset fails */
931 if (test_bit(__ICE_RESET_FAILED, pf->state))
932 return 0;
933
940b61af
AV
934 switch (q_type) {
935 case ICE_CTL_Q_ADMIN:
936 cq = &hw->adminq;
937 qtype = "Admin";
938 break;
75d2b253
AV
939 case ICE_CTL_Q_MAILBOX:
940 cq = &hw->mailboxq;
941 qtype = "Mailbox";
942 break;
940b61af
AV
943 default:
944 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
945 q_type);
946 return 0;
947 }
948
949 /* check for error indications - PF_xx_AxQLEN register layout for
950 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
951 */
952 val = rd32(hw, cq->rq.len);
953 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
954 PF_FW_ARQLEN_ARQCRIT_M)) {
955 oldval = val;
956 if (val & PF_FW_ARQLEN_ARQVFE_M)
957 dev_dbg(&pf->pdev->dev,
958 "%s Receive Queue VF Error detected\n", qtype);
959 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
960 dev_dbg(&pf->pdev->dev,
961 "%s Receive Queue Overflow Error detected\n",
962 qtype);
963 }
964 if (val & PF_FW_ARQLEN_ARQCRIT_M)
965 dev_dbg(&pf->pdev->dev,
966 "%s Receive Queue Critical Error detected\n",
967 qtype);
968 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
969 PF_FW_ARQLEN_ARQCRIT_M);
970 if (oldval != val)
971 wr32(hw, cq->rq.len, val);
972 }
973
974 val = rd32(hw, cq->sq.len);
975 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
976 PF_FW_ATQLEN_ATQCRIT_M)) {
977 oldval = val;
978 if (val & PF_FW_ATQLEN_ATQVFE_M)
979 dev_dbg(&pf->pdev->dev,
980 "%s Send Queue VF Error detected\n", qtype);
981 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
982 dev_dbg(&pf->pdev->dev,
983 "%s Send Queue Overflow Error detected\n",
984 qtype);
985 }
986 if (val & PF_FW_ATQLEN_ATQCRIT_M)
987 dev_dbg(&pf->pdev->dev,
988 "%s Send Queue Critical Error detected\n",
989 qtype);
990 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
991 PF_FW_ATQLEN_ATQCRIT_M);
992 if (oldval != val)
993 wr32(hw, cq->sq.len, val);
994 }
995
996 event.buf_len = cq->rq_buf_size;
997 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
998 GFP_KERNEL);
999 if (!event.msg_buf)
1000 return 0;
1001
1002 do {
1003 enum ice_status ret;
0b28b702 1004 u16 opcode;
940b61af
AV
1005
1006 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1007 if (ret == ICE_ERR_AQ_NO_WORK)
1008 break;
1009 if (ret) {
1010 dev_err(&pf->pdev->dev,
1011 "%s Receive Queue event error %d\n", qtype,
1012 ret);
1013 break;
1014 }
0b28b702
AV
1015
1016 opcode = le16_to_cpu(event.desc.opcode);
1017
1018 switch (opcode) {
250c3b3e 1019 case ice_aqc_opc_get_link_status:
c2a23e00 1020 if (ice_handle_link_event(pf, &event))
250c3b3e
BC
1021 dev_err(&pf->pdev->dev,
1022 "Could not handle link event\n");
1023 break;
1071a835
AV
1024 case ice_mbx_opc_send_msg_to_pf:
1025 ice_vc_process_vf_msg(pf, &event);
1026 break;
8b97ceb1
HT
1027 case ice_aqc_opc_fw_logging:
1028 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1029 break;
00cc3f1b
AV
1030 case ice_aqc_opc_lldp_set_mib_change:
1031 ice_dcb_process_lldp_set_mib_change(pf, &event);
1032 break;
0b28b702
AV
1033 default:
1034 dev_dbg(&pf->pdev->dev,
1035 "%s Receive Queue unknown event 0x%04x ignored\n",
1036 qtype, opcode);
1037 break;
1038 }
940b61af
AV
1039 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1040
1041 devm_kfree(&pf->pdev->dev, event.msg_buf);
1042
1043 return pending && (i == ICE_DFLT_IRQ_WORK);
1044}
1045
3d6b640e
AV
1046/**
1047 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1048 * @hw: pointer to hardware info
1049 * @cq: control queue information
1050 *
1051 * returns true if there are pending messages in a queue, false if there aren't
1052 */
1053static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1054{
1055 u16 ntu;
1056
1057 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1058 return cq->rq.next_to_clean != ntu;
1059}
1060
940b61af
AV
1061/**
1062 * ice_clean_adminq_subtask - clean the AdminQ rings
1063 * @pf: board private structure
1064 */
1065static void ice_clean_adminq_subtask(struct ice_pf *pf)
1066{
1067 struct ice_hw *hw = &pf->hw;
940b61af
AV
1068
1069 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1070 return;
1071
1072 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1073 return;
1074
1075 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1076
3d6b640e
AV
1077 /* There might be a situation where new messages arrive to a control
1078 * queue between processing the last message and clearing the
1079 * EVENT_PENDING bit. So before exiting, check queue head again (using
1080 * ice_ctrlq_pending) and process new messages if any.
1081 */
1082 if (ice_ctrlq_pending(hw, &hw->adminq))
1083 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
940b61af
AV
1084
1085 ice_flush(hw);
1086}
1087
75d2b253
AV
1088/**
1089 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1090 * @pf: board private structure
1091 */
1092static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1093{
1094 struct ice_hw *hw = &pf->hw;
1095
1096 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1097 return;
1098
1099 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1100 return;
1101
1102 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1103
1104 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1105 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1106
1107 ice_flush(hw);
1108}
1109
940b61af
AV
1110/**
1111 * ice_service_task_schedule - schedule the service task to wake up
1112 * @pf: board private structure
1113 *
1114 * If not already scheduled, this puts the task into the work queue.
1115 */
1116static void ice_service_task_schedule(struct ice_pf *pf)
1117{
8d81fa55 1118 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
0f9d5027
AV
1119 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1120 !test_bit(__ICE_NEEDS_RESTART, pf->state))
940b61af
AV
1121 queue_work(ice_wq, &pf->serv_task);
1122}
1123
1124/**
1125 * ice_service_task_complete - finish up the service task
1126 * @pf: board private structure
1127 */
1128static void ice_service_task_complete(struct ice_pf *pf)
1129{
1130 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1131
1132 /* force memory (pf->state) to sync before next service task */
1133 smp_mb__before_atomic();
1134 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1135}
1136
8d81fa55
AA
1137/**
1138 * ice_service_task_stop - stop service task and cancel works
1139 * @pf: board private structure
1140 */
1141static void ice_service_task_stop(struct ice_pf *pf)
1142{
1143 set_bit(__ICE_SERVICE_DIS, pf->state);
1144
1145 if (pf->serv_tmr.function)
1146 del_timer_sync(&pf->serv_tmr);
1147 if (pf->serv_task.func)
1148 cancel_work_sync(&pf->serv_task);
1149
1150 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1151}
1152
5995b6d0
BC
1153/**
1154 * ice_service_task_restart - restart service task and schedule works
1155 * @pf: board private structure
1156 *
1157 * This function is needed for suspend and resume works (e.g WoL scenario)
1158 */
1159static void ice_service_task_restart(struct ice_pf *pf)
1160{
1161 clear_bit(__ICE_SERVICE_DIS, pf->state);
1162 ice_service_task_schedule(pf);
1163}
1164
940b61af
AV
1165/**
1166 * ice_service_timer - timer callback to schedule service task
1167 * @t: pointer to timer_list
1168 */
1169static void ice_service_timer(struct timer_list *t)
1170{
1171 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1172
1173 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1174 ice_service_task_schedule(pf);
1175}
1176
b3969fd7
SM
1177/**
1178 * ice_handle_mdd_event - handle malicious driver detect event
1179 * @pf: pointer to the PF structure
1180 *
1181 * Called from service task. OICR interrupt handler indicates MDD event
1182 */
1183static void ice_handle_mdd_event(struct ice_pf *pf)
1184{
1185 struct ice_hw *hw = &pf->hw;
1186 bool mdd_detected = false;
1187 u32 reg;
7c4bc1f5 1188 int i;
b3969fd7 1189
8d7189d2 1190 if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
b3969fd7
SM
1191 return;
1192
1193 /* find what triggered the MDD event */
1194 reg = rd32(hw, GL_MDET_TX_PQM);
1195 if (reg & GL_MDET_TX_PQM_VALID_M) {
1196 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1197 GL_MDET_TX_PQM_PF_NUM_S;
1198 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1199 GL_MDET_TX_PQM_VF_NUM_S;
1200 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1201 GL_MDET_TX_PQM_MAL_TYPE_S;
1202 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1203 GL_MDET_TX_PQM_QNUM_S);
1204
1205 if (netif_msg_tx_err(pf))
1206 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1207 event, queue, pf_num, vf_num);
1208 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1209 mdd_detected = true;
1210 }
1211
1212 reg = rd32(hw, GL_MDET_TX_TCLAN);
1213 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1214 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1215 GL_MDET_TX_TCLAN_PF_NUM_S;
1216 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1217 GL_MDET_TX_TCLAN_VF_NUM_S;
1218 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1219 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1220 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1221 GL_MDET_TX_TCLAN_QNUM_S);
1222
1223 if (netif_msg_rx_err(pf))
1224 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1225 event, queue, pf_num, vf_num);
1226 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1227 mdd_detected = true;
1228 }
1229
1230 reg = rd32(hw, GL_MDET_RX);
1231 if (reg & GL_MDET_RX_VALID_M) {
1232 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1233 GL_MDET_RX_PF_NUM_S;
1234 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1235 GL_MDET_RX_VF_NUM_S;
1236 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1237 GL_MDET_RX_MAL_TYPE_S;
1238 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1239 GL_MDET_RX_QNUM_S);
1240
1241 if (netif_msg_rx_err(pf))
1242 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1243 event, queue, pf_num, vf_num);
1244 wr32(hw, GL_MDET_RX, 0xffffffff);
1245 mdd_detected = true;
1246 }
1247
1248 if (mdd_detected) {
1249 bool pf_mdd_detected = false;
1250
1251 reg = rd32(hw, PF_MDET_TX_PQM);
1252 if (reg & PF_MDET_TX_PQM_VALID_M) {
1253 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1254 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1255 pf_mdd_detected = true;
1256 }
1257
1258 reg = rd32(hw, PF_MDET_TX_TCLAN);
1259 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1260 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1261 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
1262 pf_mdd_detected = true;
1263 }
1264
1265 reg = rd32(hw, PF_MDET_RX);
1266 if (reg & PF_MDET_RX_VALID_M) {
1267 wr32(hw, PF_MDET_RX, 0xFFFF);
1268 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
1269 pf_mdd_detected = true;
1270 }
1271 /* Queue belongs to the PF initiate a reset */
1272 if (pf_mdd_detected) {
1273 set_bit(__ICE_NEEDS_RESTART, pf->state);
1274 ice_service_task_schedule(pf);
1275 }
1276 }
1277
23c01122
MW
1278 /* check to see if one of the VFs caused the MDD */
1279 for (i = 0; i < pf->num_alloc_vfs; i++) {
7c4bc1f5
AV
1280 struct ice_vf *vf = &pf->vf[i];
1281
23c01122 1282 bool vf_mdd_detected = false;
a52db6b2 1283
7c4bc1f5
AV
1284 reg = rd32(hw, VP_MDET_TX_PQM(i));
1285 if (reg & VP_MDET_TX_PQM_VALID_M) {
1286 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
23c01122 1287 vf_mdd_detected = true;
7c4bc1f5
AV
1288 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1289 i);
1290 }
1291
1292 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1293 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1294 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
23c01122 1295 vf_mdd_detected = true;
7c4bc1f5
AV
1296 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1297 i);
1298 }
1299
1300 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1301 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1302 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
23c01122 1303 vf_mdd_detected = true;
7c4bc1f5
AV
1304 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1305 i);
1306 }
1307
1308 reg = rd32(hw, VP_MDET_RX(i));
1309 if (reg & VP_MDET_RX_VALID_M) {
1310 wr32(hw, VP_MDET_RX(i), 0xFFFF);
23c01122 1311 vf_mdd_detected = true;
7c4bc1f5
AV
1312 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
1313 i);
1314 }
1315
23c01122 1316 if (vf_mdd_detected) {
a52db6b2 1317 vf->num_mdd_events++;
e63a1dbd
AA
1318 if (vf->num_mdd_events &&
1319 vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
1320 dev_info(&pf->pdev->dev,
1321 "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
23c01122 1322 i, vf->num_mdd_events);
7c4bc1f5
AV
1323 }
1324 }
b3969fd7
SM
1325}
1326
6d599946
TN
1327/**
1328 * ice_force_phys_link_state - Force the physical link state
1329 * @vsi: VSI to force the physical link state to up/down
1330 * @link_up: true/false indicates to set the physical link to up/down
1331 *
1332 * Force the physical link state by getting the current PHY capabilities from
1333 * hardware and setting the PHY config based on the determined capabilities. If
1334 * link changes a link event will be triggered because both the Enable Automatic
1335 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1336 *
1337 * Returns 0 on success, negative on failure
1338 */
1339static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1340{
1341 struct ice_aqc_get_phy_caps_data *pcaps;
1342 struct ice_aqc_set_phy_cfg_data *cfg;
1343 struct ice_port_info *pi;
1344 struct device *dev;
1345 int retcode;
1346
1347 if (!vsi || !vsi->port_info || !vsi->back)
1348 return -EINVAL;
1349 if (vsi->type != ICE_VSI_PF)
1350 return 0;
1351
1352 dev = &vsi->back->pdev->dev;
1353
1354 pi = vsi->port_info;
1355
1356 pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
1357 if (!pcaps)
1358 return -ENOMEM;
1359
1360 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1361 NULL);
1362 if (retcode) {
1363 dev_err(dev,
1364 "Failed to get phy capabilities, VSI %d error %d\n",
1365 vsi->vsi_num, retcode);
1366 retcode = -EIO;
1367 goto out;
1368 }
1369
1370 /* No change in link */
1371 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1372 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1373 goto out;
1374
1375 cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
1376 if (!cfg) {
1377 retcode = -ENOMEM;
1378 goto out;
1379 }
1380
1381 cfg->phy_type_low = pcaps->phy_type_low;
1382 cfg->phy_type_high = pcaps->phy_type_high;
1383 cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1384 cfg->low_power_ctrl = pcaps->low_power_ctrl;
1385 cfg->eee_cap = pcaps->eee_cap;
1386 cfg->eeer_value = pcaps->eeer_value;
1387 cfg->link_fec_opt = pcaps->link_fec_options;
1388 if (link_up)
1389 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1390 else
1391 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1392
1393 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
1394 if (retcode) {
1395 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1396 vsi->vsi_num, retcode);
1397 retcode = -EIO;
1398 }
1399
1400 devm_kfree(dev, cfg);
1401out:
1402 devm_kfree(dev, pcaps);
1403 return retcode;
1404}
1405
1406/**
1407 * ice_check_media_subtask - Check for media; bring link up if detected.
1408 * @pf: pointer to PF struct
1409 */
1410static void ice_check_media_subtask(struct ice_pf *pf)
1411{
1412 struct ice_port_info *pi;
1413 struct ice_vsi *vsi;
1414 int err;
1415
1416 vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
1417 if (!vsi)
1418 return;
1419
1420 /* No need to check for media if it's already present or the interface
1421 * is down
1422 */
1423 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
1424 test_bit(__ICE_DOWN, vsi->state))
1425 return;
1426
1427 /* Refresh link info and check if media is present */
1428 pi = vsi->port_info;
1429 err = ice_update_link_info(pi);
1430 if (err)
1431 return;
1432
1433 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1434 err = ice_force_phys_link_state(vsi, true);
1435 if (err)
1436 return;
1437 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1438
1439 /* A Link Status Event will be generated; the event handler
1440 * will complete bringing the interface up
1441 */
1442 }
1443}
1444
940b61af
AV
1445/**
1446 * ice_service_task - manage and run subtasks
1447 * @work: pointer to work_struct contained by the PF struct
1448 */
1449static void ice_service_task(struct work_struct *work)
1450{
1451 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
1452 unsigned long start_time = jiffies;
1453
1454 /* subtasks */
0b28b702
AV
1455
1456 /* process reset requests first */
1457 ice_reset_subtask(pf);
1458
0f9d5027 1459 /* bail if a reset/recovery cycle is pending or rebuild failed */
5df7e45d 1460 if (ice_is_reset_in_progress(pf->state) ||
0f9d5027
AV
1461 test_bit(__ICE_SUSPENDED, pf->state) ||
1462 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
0b28b702
AV
1463 ice_service_task_complete(pf);
1464 return;
1465 }
1466
6d599946 1467 ice_check_media_subtask(pf);
b3969fd7 1468 ice_check_for_hang_subtask(pf);
e94d4478 1469 ice_sync_fltr_subtask(pf);
b3969fd7 1470 ice_handle_mdd_event(pf);
007676b4 1471 ice_process_vflr_event(pf);
fcea6f3d 1472 ice_watchdog_subtask(pf);
940b61af 1473 ice_clean_adminq_subtask(pf);
75d2b253 1474 ice_clean_mailboxq_subtask(pf);
940b61af
AV
1475
1476 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
1477 ice_service_task_complete(pf);
1478
1479 /* If the tasks have taken longer than one service timer period
1480 * or there is more work to be done, reset the service timer to
1481 * schedule the service task now.
1482 */
1483 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
b3969fd7 1484 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
007676b4 1485 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
75d2b253 1486 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
940b61af
AV
1487 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1488 mod_timer(&pf->serv_tmr, jiffies);
1489}
1490
f31e4b6f
AV
1491/**
1492 * ice_set_ctrlq_len - helper function to set controlq length
f9867df6 1493 * @hw: pointer to the HW instance
f31e4b6f
AV
1494 */
1495static void ice_set_ctrlq_len(struct ice_hw *hw)
1496{
1497 hw->adminq.num_rq_entries = ICE_AQ_LEN;
1498 hw->adminq.num_sq_entries = ICE_AQ_LEN;
1499 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1500 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
11836214
BC
1501 hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
1502 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
75d2b253
AV
1503 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1504 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
f31e4b6f
AV
1505}
1506
cdedef59
AV
1507/**
1508 * ice_irq_affinity_notify - Callback for affinity changes
1509 * @notify: context as to what irq was changed
1510 * @mask: the new affinity mask
1511 *
1512 * This is a callback function used by the irq_set_affinity_notifier function
1513 * so that we may register to receive changes to the irq affinity masks.
1514 */
c8b7abdd
BA
1515static void
1516ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1517 const cpumask_t *mask)
cdedef59
AV
1518{
1519 struct ice_q_vector *q_vector =
1520 container_of(notify, struct ice_q_vector, affinity_notify);
1521
1522 cpumask_copy(&q_vector->affinity_mask, mask);
1523}
1524
1525/**
1526 * ice_irq_affinity_release - Callback for affinity notifier release
1527 * @ref: internal core kernel usage
1528 *
1529 * This is a callback function used by the irq_set_affinity_notifier function
1530 * to inform the current notification subscriber that they will no longer
1531 * receive notifications.
1532 */
1533static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1534
cdedef59
AV
1535/**
1536 * ice_vsi_ena_irq - Enable IRQ for the given VSI
1537 * @vsi: the VSI being configured
1538 */
1539static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1540{
ba880734
BC
1541 struct ice_hw *hw = &vsi->back->hw;
1542 int i;
cdedef59 1543
ba880734
BC
1544 ice_for_each_q_vector(vsi, i)
1545 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
cdedef59
AV
1546
1547 ice_flush(hw);
1548 return 0;
1549}
1550
cdedef59
AV
1551/**
1552 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
1553 * @vsi: the VSI being configured
1554 * @basename: name for the vector
1555 */
1556static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
1557{
1558 int q_vectors = vsi->num_q_vectors;
1559 struct ice_pf *pf = vsi->back;
cbe66bfe 1560 int base = vsi->base_vector;
cdedef59
AV
1561 int rx_int_idx = 0;
1562 int tx_int_idx = 0;
1563 int vector, err;
1564 int irq_num;
1565
1566 for (vector = 0; vector < q_vectors; vector++) {
1567 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
1568
1569 irq_num = pf->msix_entries[base + vector].vector;
1570
1571 if (q_vector->tx.ring && q_vector->rx.ring) {
1572 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1573 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
1574 tx_int_idx++;
1575 } else if (q_vector->rx.ring) {
1576 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1577 "%s-%s-%d", basename, "rx", rx_int_idx++);
1578 } else if (q_vector->tx.ring) {
1579 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1580 "%s-%s-%d", basename, "tx", tx_int_idx++);
1581 } else {
1582 /* skip this unused q_vector */
1583 continue;
1584 }
8d051b8b
AB
1585 err = devm_request_irq(&pf->pdev->dev, irq_num,
1586 vsi->irq_handler, 0,
1587 q_vector->name, q_vector);
cdedef59
AV
1588 if (err) {
1589 netdev_err(vsi->netdev,
1590 "MSIX request_irq failed, error: %d\n", err);
1591 goto free_q_irqs;
1592 }
1593
1594 /* register for affinity change notifications */
1595 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
1596 q_vector->affinity_notify.release = ice_irq_affinity_release;
1597 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
1598
1599 /* assign the mask for this irq */
1600 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
1601 }
1602
1603 vsi->irqs_ready = true;
1604 return 0;
1605
1606free_q_irqs:
1607 while (vector) {
1608 vector--;
1609 irq_num = pf->msix_entries[base + vector].vector,
1610 irq_set_affinity_notifier(irq_num, NULL);
1611 irq_set_affinity_hint(irq_num, NULL);
1612 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
1613 }
1614 return err;
1615}
1616
940b61af
AV
1617/**
1618 * ice_ena_misc_vector - enable the non-queue interrupts
1619 * @pf: board private structure
1620 */
1621static void ice_ena_misc_vector(struct ice_pf *pf)
1622{
1623 struct ice_hw *hw = &pf->hw;
1624 u32 val;
1625
1626 /* clear things first */
1627 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1628 rd32(hw, PFINT_OICR); /* read to clear */
1629
3bcd7fa3 1630 val = (PFINT_OICR_ECC_ERR_M |
940b61af
AV
1631 PFINT_OICR_MAL_DETECT_M |
1632 PFINT_OICR_GRST_M |
1633 PFINT_OICR_PCI_EXCEPTION_M |
007676b4 1634 PFINT_OICR_VFLR_M |
3bcd7fa3
BA
1635 PFINT_OICR_HMC_ERR_M |
1636 PFINT_OICR_PE_CRITERR_M);
940b61af
AV
1637
1638 wr32(hw, PFINT_OICR_ENA, val);
1639
1640 /* SW_ITR_IDX = 0, but don't change INTENA */
cbe66bfe 1641 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
940b61af
AV
1642 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
1643}
1644
1645/**
1646 * ice_misc_intr - misc interrupt handler
1647 * @irq: interrupt number
1648 * @data: pointer to a q_vector
1649 */
1650static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1651{
1652 struct ice_pf *pf = (struct ice_pf *)data;
1653 struct ice_hw *hw = &pf->hw;
1654 irqreturn_t ret = IRQ_NONE;
1655 u32 oicr, ena_mask;
1656
1657 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
75d2b253 1658 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
940b61af
AV
1659
1660 oicr = rd32(hw, PFINT_OICR);
1661 ena_mask = rd32(hw, PFINT_OICR_ENA);
1662
0e674aeb
AV
1663 if (oicr & PFINT_OICR_SWINT_M) {
1664 ena_mask &= ~PFINT_OICR_SWINT_M;
1665 pf->sw_int_count++;
1666 }
1667
b3969fd7
SM
1668 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1669 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
1670 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1671 }
007676b4
AV
1672 if (oicr & PFINT_OICR_VFLR_M) {
1673 ena_mask &= ~PFINT_OICR_VFLR_M;
1674 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
1675 }
b3969fd7 1676
0b28b702
AV
1677 if (oicr & PFINT_OICR_GRST_M) {
1678 u32 reset;
b3969fd7 1679
0b28b702
AV
1680 /* we have a reset warning */
1681 ena_mask &= ~PFINT_OICR_GRST_M;
1682 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
1683 GLGEN_RSTAT_RESET_TYPE_S;
1684
1685 if (reset == ICE_RESET_CORER)
1686 pf->corer_count++;
1687 else if (reset == ICE_RESET_GLOBR)
1688 pf->globr_count++;
ca4929b6 1689 else if (reset == ICE_RESET_EMPR)
0b28b702 1690 pf->empr_count++;
ca4929b6
BC
1691 else
1692 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
1693 reset);
0b28b702
AV
1694
1695 /* If a reset cycle isn't already in progress, we set a bit in
1696 * pf->state so that the service task can start a reset/rebuild.
1697 * We also make note of which reset happened so that peer
1698 * devices/drivers can be informed.
1699 */
5df7e45d 1700 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
0b28b702
AV
1701 if (reset == ICE_RESET_CORER)
1702 set_bit(__ICE_CORER_RECV, pf->state);
1703 else if (reset == ICE_RESET_GLOBR)
1704 set_bit(__ICE_GLOBR_RECV, pf->state);
1705 else
1706 set_bit(__ICE_EMPR_RECV, pf->state);
1707
fd2a9817
AV
1708 /* There are couple of different bits at play here.
1709 * hw->reset_ongoing indicates whether the hardware is
1710 * in reset. This is set to true when a reset interrupt
1711 * is received and set back to false after the driver
1712 * has determined that the hardware is out of reset.
1713 *
5df7e45d 1714 * __ICE_RESET_OICR_RECV in pf->state indicates
fd2a9817
AV
1715 * that a post reset rebuild is required before the
1716 * driver is operational again. This is set above.
1717 *
1718 * As this is the start of the reset/rebuild cycle, set
1719 * both to indicate that.
1720 */
1721 hw->reset_ongoing = true;
0b28b702
AV
1722 }
1723 }
1724
940b61af
AV
1725 if (oicr & PFINT_OICR_HMC_ERR_M) {
1726 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
1727 dev_dbg(&pf->pdev->dev,
1728 "HMC Error interrupt - info 0x%x, data 0x%x\n",
1729 rd32(hw, PFHMC_ERRORINFO),
1730 rd32(hw, PFHMC_ERRORDATA));
1731 }
1732
8d7189d2 1733 /* Report any remaining unexpected interrupts */
940b61af
AV
1734 oicr &= ena_mask;
1735 if (oicr) {
1736 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
1737 oicr);
1738 /* If a critical error is pending there is no choice but to
1739 * reset the device.
1740 */
1741 if (oicr & (PFINT_OICR_PE_CRITERR_M |
1742 PFINT_OICR_PCI_EXCEPTION_M |
0b28b702 1743 PFINT_OICR_ECC_ERR_M)) {
940b61af 1744 set_bit(__ICE_PFR_REQ, pf->state);
0b28b702
AV
1745 ice_service_task_schedule(pf);
1746 }
940b61af
AV
1747 }
1748 ret = IRQ_HANDLED;
1749
940b61af
AV
1750 if (!test_bit(__ICE_DOWN, pf->state)) {
1751 ice_service_task_schedule(pf);
cdedef59 1752 ice_irq_dynamic_ena(hw, NULL, NULL);
940b61af
AV
1753 }
1754
1755 return ret;
1756}
1757
0e04e8e1
BC
1758/**
1759 * ice_dis_ctrlq_interrupts - disable control queue interrupts
1760 * @hw: pointer to HW structure
1761 */
1762static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
1763{
1764 /* disable Admin queue Interrupt causes */
1765 wr32(hw, PFINT_FW_CTL,
1766 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
1767
1768 /* disable Mailbox queue Interrupt causes */
1769 wr32(hw, PFINT_MBX_CTL,
1770 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
1771
1772 /* disable Control queue Interrupt causes */
1773 wr32(hw, PFINT_OICR_CTL,
1774 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
1775
1776 ice_flush(hw);
1777}
1778
940b61af
AV
1779/**
1780 * ice_free_irq_msix_misc - Unroll misc vector setup
1781 * @pf: board private structure
1782 */
1783static void ice_free_irq_msix_misc(struct ice_pf *pf)
1784{
0e04e8e1
BC
1785 struct ice_hw *hw = &pf->hw;
1786
1787 ice_dis_ctrlq_interrupts(hw);
1788
940b61af 1789 /* disable OICR interrupt */
0e04e8e1
BC
1790 wr32(hw, PFINT_OICR_ENA, 0);
1791 ice_flush(hw);
940b61af 1792
ba880734 1793 if (pf->msix_entries) {
cbe66bfe 1794 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
940b61af 1795 devm_free_irq(&pf->pdev->dev,
cbe66bfe 1796 pf->msix_entries[pf->oicr_idx].vector, pf);
940b61af
AV
1797 }
1798
eb0208ec 1799 pf->num_avail_sw_msix += 1;
cbe66bfe 1800 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
940b61af
AV
1801}
1802
0e04e8e1
BC
1803/**
1804 * ice_ena_ctrlq_interrupts - enable control queue interrupts
1805 * @hw: pointer to HW structure
b07833a0 1806 * @reg_idx: HW vector index to associate the control queue interrupts with
0e04e8e1 1807 */
b07833a0 1808static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
0e04e8e1
BC
1809{
1810 u32 val;
1811
b07833a0 1812 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
0e04e8e1
BC
1813 PFINT_OICR_CTL_CAUSE_ENA_M);
1814 wr32(hw, PFINT_OICR_CTL, val);
1815
1816 /* enable Admin queue Interrupt causes */
b07833a0 1817 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
0e04e8e1
BC
1818 PFINT_FW_CTL_CAUSE_ENA_M);
1819 wr32(hw, PFINT_FW_CTL, val);
1820
1821 /* enable Mailbox queue Interrupt causes */
b07833a0 1822 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
0e04e8e1
BC
1823 PFINT_MBX_CTL_CAUSE_ENA_M);
1824 wr32(hw, PFINT_MBX_CTL, val);
1825
1826 ice_flush(hw);
1827}
1828
940b61af
AV
1829/**
1830 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
1831 * @pf: board private structure
1832 *
1833 * This sets up the handler for MSIX 0, which is used to manage the
df17b7e0 1834 * non-queue interrupts, e.g. AdminQ and errors. This is not used
940b61af
AV
1835 * when in MSI or Legacy interrupt mode.
1836 */
1837static int ice_req_irq_msix_misc(struct ice_pf *pf)
1838{
1839 struct ice_hw *hw = &pf->hw;
1840 int oicr_idx, err = 0;
940b61af
AV
1841
1842 if (!pf->int_name[0])
1843 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
1844 dev_driver_string(&pf->pdev->dev),
1845 dev_name(&pf->pdev->dev));
1846
0b28b702
AV
1847 /* Do not request IRQ but do enable OICR interrupt since settings are
1848 * lost during reset. Note that this function is called only during
1849 * rebuild path and not while reset is in progress.
1850 */
5df7e45d 1851 if (ice_is_reset_in_progress(pf->state))
0b28b702
AV
1852 goto skip_req_irq;
1853
cbe66bfe
BC
1854 /* reserve one vector in irq_tracker for misc interrupts */
1855 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
940b61af
AV
1856 if (oicr_idx < 0)
1857 return oicr_idx;
1858
eb0208ec 1859 pf->num_avail_sw_msix -= 1;
cbe66bfe 1860 pf->oicr_idx = oicr_idx;
940b61af
AV
1861
1862 err = devm_request_irq(&pf->pdev->dev,
cbe66bfe 1863 pf->msix_entries[pf->oicr_idx].vector,
940b61af
AV
1864 ice_misc_intr, 0, pf->int_name, pf);
1865 if (err) {
1866 dev_err(&pf->pdev->dev,
1867 "devm_request_irq for %s failed: %d\n",
1868 pf->int_name, err);
cbe66bfe 1869 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
eb0208ec 1870 pf->num_avail_sw_msix += 1;
940b61af
AV
1871 return err;
1872 }
1873
0b28b702 1874skip_req_irq:
940b61af
AV
1875 ice_ena_misc_vector(pf);
1876
cbe66bfe
BC
1877 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
1878 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
63f545ed 1879 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
940b61af
AV
1880
1881 ice_flush(hw);
cdedef59 1882 ice_irq_dynamic_ena(hw, NULL, NULL);
940b61af
AV
1883
1884 return 0;
1885}
1886
3a858ba3 1887/**
df0f8479
AV
1888 * ice_napi_add - register NAPI handler for the VSI
1889 * @vsi: VSI for which NAPI handler is to be registered
3a858ba3 1890 *
df0f8479
AV
1891 * This function is only called in the driver's load path. Registering the NAPI
1892 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
1893 * reset/rebuild, etc.)
3a858ba3 1894 */
df0f8479 1895static void ice_napi_add(struct ice_vsi *vsi)
3a858ba3 1896{
df0f8479 1897 int v_idx;
3a858ba3 1898
df0f8479 1899 if (!vsi->netdev)
3a858ba3 1900 return;
3a858ba3 1901
0c2561c8 1902 ice_for_each_q_vector(vsi, v_idx)
df0f8479
AV
1903 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
1904 ice_napi_poll, NAPI_POLL_WEIGHT);
3a858ba3
AV
1905}
1906
1907/**
df0f8479
AV
1908 * ice_cfg_netdev - Allocate, configure and register a netdev
1909 * @vsi: the VSI associated with the new netdev
3a858ba3
AV
1910 *
1911 * Returns 0 on success, negative value on failure
1912 */
1913static int ice_cfg_netdev(struct ice_vsi *vsi)
1914{
d76a60ba
AV
1915 netdev_features_t csumo_features;
1916 netdev_features_t vlano_features;
1917 netdev_features_t dflt_features;
1918 netdev_features_t tso_features;
3a858ba3
AV
1919 struct ice_netdev_priv *np;
1920 struct net_device *netdev;
1921 u8 mac_addr[ETH_ALEN];
df0f8479 1922 int err;
3a858ba3 1923
c6dfd690
BA
1924 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
1925 vsi->alloc_rxq);
3a858ba3
AV
1926 if (!netdev)
1927 return -ENOMEM;
1928
1929 vsi->netdev = netdev;
1930 np = netdev_priv(netdev);
1931 np->vsi = vsi;
1932
d76a60ba
AV
1933 dflt_features = NETIF_F_SG |
1934 NETIF_F_HIGHDMA |
1935 NETIF_F_RXHASH;
1936
1937 csumo_features = NETIF_F_RXCSUM |
1938 NETIF_F_IP_CSUM |
cf909e19 1939 NETIF_F_SCTP_CRC |
d76a60ba
AV
1940 NETIF_F_IPV6_CSUM;
1941
1942 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
1943 NETIF_F_HW_VLAN_CTAG_TX |
1944 NETIF_F_HW_VLAN_CTAG_RX;
1945
1946 tso_features = NETIF_F_TSO;
1947
3a858ba3 1948 /* set features that user can change */
d76a60ba
AV
1949 netdev->hw_features = dflt_features | csumo_features |
1950 vlano_features | tso_features;
3a858ba3
AV
1951
1952 /* enable features */
1953 netdev->features |= netdev->hw_features;
d76a60ba
AV
1954 /* encap and VLAN devices inherit default, csumo and tso features */
1955 netdev->hw_enc_features |= dflt_features | csumo_features |
1956 tso_features;
1957 netdev->vlan_features |= dflt_features | csumo_features |
1958 tso_features;
3a858ba3
AV
1959
1960 if (vsi->type == ICE_VSI_PF) {
1961 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
1962 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
1963
1964 ether_addr_copy(netdev->dev_addr, mac_addr);
1965 ether_addr_copy(netdev->perm_addr, mac_addr);
1966 }
1967
1968 netdev->priv_flags |= IFF_UNICAST_FLT;
1969
cdedef59
AV
1970 /* assign netdev_ops */
1971 netdev->netdev_ops = &ice_netdev_ops;
1972
3a858ba3
AV
1973 /* setup watchdog timeout value to be 5 second */
1974 netdev->watchdog_timeo = 5 * HZ;
1975
fcea6f3d
AV
1976 ice_set_ethtool_ops(netdev);
1977
3a858ba3
AV
1978 netdev->min_mtu = ETH_MIN_MTU;
1979 netdev->max_mtu = ICE_MAX_MTU;
1980
df0f8479
AV
1981 err = register_netdev(vsi->netdev);
1982 if (err)
1983 return err;
3a858ba3 1984
df0f8479 1985 netif_carrier_off(vsi->netdev);
3a858ba3 1986
df0f8479
AV
1987 /* make sure transmit queues start off as stopped */
1988 netif_tx_stop_all_queues(vsi->netdev);
3a858ba3
AV
1989
1990 return 0;
1991}
1992
d76a60ba
AV
1993/**
1994 * ice_fill_rss_lut - Fill the RSS lookup table with default values
1995 * @lut: Lookup table
1996 * @rss_table_size: Lookup table size
1997 * @rss_size: Range of queue number for hashing
1998 */
1999void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
2000{
2001 u16 i;
2002
2003 for (i = 0; i < rss_table_size; i++)
2004 lut[i] = i % rss_size;
2005}
2006
0f9d5027
AV
2007/**
2008 * ice_pf_vsi_setup - Set up a PF VSI
2009 * @pf: board private structure
2010 * @pi: pointer to the port_info instance
2011 *
0e674aeb
AV
2012 * Returns pointer to the successfully allocated VSI software struct
2013 * on success, otherwise returns NULL on failure.
0f9d5027
AV
2014 */
2015static struct ice_vsi *
2016ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
2017{
2018 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
2019}
2020
0e674aeb
AV
2021/**
2022 * ice_lb_vsi_setup - Set up a loopback VSI
2023 * @pf: board private structure
2024 * @pi: pointer to the port_info instance
2025 *
2026 * Returns pointer to the successfully allocated VSI software struct
2027 * on success, otherwise returns NULL on failure.
2028 */
2029struct ice_vsi *
2030ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
2031{
2032 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
2033}
2034
d76a60ba 2035/**
f9867df6 2036 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
d76a60ba
AV
2037 * @netdev: network interface to be adjusted
2038 * @proto: unused protocol
f9867df6 2039 * @vid: VLAN ID to be added
d76a60ba 2040 *
f9867df6 2041 * net_device_ops implementation for adding VLAN IDs
d76a60ba 2042 */
c8b7abdd
BA
2043static int
2044ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
2045 u16 vid)
d76a60ba
AV
2046{
2047 struct ice_netdev_priv *np = netdev_priv(netdev);
2048 struct ice_vsi *vsi = np->vsi;
5eda8afd 2049 int ret;
d76a60ba
AV
2050
2051 if (vid >= VLAN_N_VID) {
2052 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
2053 vid, VLAN_N_VID);
2054 return -EINVAL;
2055 }
2056
2057 if (vsi->info.pvid)
2058 return -EINVAL;
2059
4f74dcc1
BC
2060 /* Enable VLAN pruning when VLAN 0 is added */
2061 if (unlikely(!vid)) {
5eda8afd 2062 ret = ice_cfg_vlan_pruning(vsi, true, false);
4f74dcc1
BC
2063 if (ret)
2064 return ret;
2065 }
2066
f9867df6 2067 /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
d76a60ba
AV
2068 * needed to continue allowing all untagged packets since VLAN prune
2069 * list is applied to all packets by the switch
2070 */
5eda8afd
AA
2071 ret = ice_vsi_add_vlan(vsi, vid);
2072 if (!ret) {
2073 vsi->vlan_ena = true;
2074 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
2075 }
2076
2077 return ret;
d76a60ba
AV
2078}
2079
d76a60ba 2080/**
f9867df6 2081 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
d76a60ba
AV
2082 * @netdev: network interface to be adjusted
2083 * @proto: unused protocol
f9867df6 2084 * @vid: VLAN ID to be removed
d76a60ba 2085 *
f9867df6 2086 * net_device_ops implementation for removing VLAN IDs
d76a60ba 2087 */
c8b7abdd
BA
2088static int
2089ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
2090 u16 vid)
d76a60ba
AV
2091{
2092 struct ice_netdev_priv *np = netdev_priv(netdev);
2093 struct ice_vsi *vsi = np->vsi;
5eda8afd 2094 int ret;
d76a60ba
AV
2095
2096 if (vsi->info.pvid)
2097 return -EINVAL;
2098
4f74dcc1
BC
2099 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
2100 * information
d76a60ba 2101 */
5eda8afd
AA
2102 ret = ice_vsi_kill_vlan(vsi, vid);
2103 if (ret)
2104 return ret;
d76a60ba 2105
4f74dcc1
BC
2106 /* Disable VLAN pruning when VLAN 0 is removed */
2107 if (unlikely(!vid))
5eda8afd 2108 ret = ice_cfg_vlan_pruning(vsi, false, false);
4f74dcc1 2109
5eda8afd
AA
2110 vsi->vlan_ena = false;
2111 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
2112 return ret;
d76a60ba
AV
2113}
2114
3a858ba3
AV
2115/**
2116 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
2117 * @pf: board private structure
2118 *
2119 * Returns 0 on success, negative value on failure
2120 */
2121static int ice_setup_pf_sw(struct ice_pf *pf)
2122{
2123 struct ice_vsi *vsi;
2124 int status = 0;
2125
5df7e45d 2126 if (ice_is_reset_in_progress(pf->state))
0f9d5027
AV
2127 return -EBUSY;
2128
2129 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
2130 if (!vsi) {
2131 status = -ENOMEM;
2132 goto unroll_vsi_setup;
3a858ba3
AV
2133 }
2134
df0f8479
AV
2135 status = ice_cfg_netdev(vsi);
2136 if (status) {
2137 status = -ENODEV;
2138 goto unroll_vsi_setup;
2139 }
2140
2141 /* registering the NAPI handler requires both the queues and
2142 * netdev to be created, which are done in ice_pf_vsi_setup()
2143 * and ice_cfg_netdev() respectively
2144 */
2145 ice_napi_add(vsi);
2146
561f4379 2147 status = ice_init_mac_fltr(pf);
9daf8208 2148 if (status)
df0f8479 2149 goto unroll_napi_add;
9daf8208 2150
9daf8208
AV
2151 return status;
2152
df0f8479 2153unroll_napi_add:
3a858ba3 2154 if (vsi) {
df0f8479 2155 ice_napi_del(vsi);
3a858ba3 2156 if (vsi->netdev) {
df0f8479
AV
2157 if (vsi->netdev->reg_state == NETREG_REGISTERED)
2158 unregister_netdev(vsi->netdev);
3a858ba3
AV
2159 free_netdev(vsi->netdev);
2160 vsi->netdev = NULL;
2161 }
df0f8479 2162 }
9daf8208 2163
df0f8479
AV
2164unroll_vsi_setup:
2165 if (vsi) {
2166 ice_vsi_free_q_vectors(vsi);
3a858ba3
AV
2167 ice_vsi_delete(vsi);
2168 ice_vsi_put_qs(vsi);
2169 pf->q_left_tx += vsi->alloc_txq;
2170 pf->q_left_rx += vsi->alloc_rxq;
2171 ice_vsi_clear(vsi);
2172 }
2173 return status;
2174}
2175
940b61af
AV
2176/**
2177 * ice_determine_q_usage - Calculate queue distribution
2178 * @pf: board private structure
2179 *
2180 * Return -ENOMEM if we don't get enough queues for all ports
2181 */
2182static void ice_determine_q_usage(struct ice_pf *pf)
2183{
2184 u16 q_left_tx, q_left_rx;
2185
2186 q_left_tx = pf->hw.func_caps.common_cap.num_txq;
2187 q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
2188
5513b920 2189 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
d76a60ba 2190
d337f2af 2191 /* only 1 Rx queue unless RSS is enabled */
d76a60ba
AV
2192 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2193 pf->num_lan_rx = 1;
2194 else
2195 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
940b61af
AV
2196
2197 pf->q_left_tx = q_left_tx - pf->num_lan_tx;
2198 pf->q_left_rx = q_left_rx - pf->num_lan_rx;
2199}
2200
2201/**
2202 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
2203 * @pf: board private structure to initialize
2204 */
2205static void ice_deinit_pf(struct ice_pf *pf)
2206{
8d81fa55 2207 ice_service_task_stop(pf);
940b61af
AV
2208 mutex_destroy(&pf->sw_mutex);
2209 mutex_destroy(&pf->avail_q_mutex);
78b5713a
AV
2210
2211 if (pf->avail_txqs) {
2212 bitmap_free(pf->avail_txqs);
2213 pf->avail_txqs = NULL;
2214 }
2215
2216 if (pf->avail_rxqs) {
2217 bitmap_free(pf->avail_rxqs);
2218 pf->avail_rxqs = NULL;
2219 }
940b61af
AV
2220}
2221
2222/**
2223 * ice_init_pf - Initialize general software structures (struct ice_pf)
2224 * @pf: board private structure to initialize
2225 */
78b5713a 2226static int ice_init_pf(struct ice_pf *pf)
940b61af
AV
2227{
2228 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
75d2b253
AV
2229#ifdef CONFIG_PCI_IOV
2230 if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
2231 struct ice_hw *hw = &pf->hw;
2232
2233 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
2234 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
2235 ICE_MAX_VF_COUNT);
2236 }
2237#endif /* CONFIG_PCI_IOV */
940b61af
AV
2238
2239 mutex_init(&pf->sw_mutex);
2240 mutex_init(&pf->avail_q_mutex);
2241
d76a60ba
AV
2242 if (pf->hw.func_caps.common_cap.rss_table_size)
2243 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
2244
940b61af
AV
2245 /* setup service timer and periodic service task */
2246 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
2247 pf->serv_tmr_period = HZ;
2248 INIT_WORK(&pf->serv_task, ice_service_task);
2249 clear_bit(__ICE_SERVICE_SCHED, pf->state);
78b5713a
AV
2250
2251 pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq;
2252 pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq;
2253
2254 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
2255 if (!pf->avail_txqs)
2256 return -ENOMEM;
2257
2258 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
2259 if (!pf->avail_rxqs) {
2260 devm_kfree(&pf->pdev->dev, pf->avail_txqs);
2261 pf->avail_txqs = NULL;
2262 return -ENOMEM;
2263 }
2264
2265 return 0;
940b61af
AV
2266}
2267
2268/**
2269 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
2270 * @pf: board private structure
2271 *
2272 * compute the number of MSIX vectors required (v_budget) and request from
2273 * the OS. Return the number of vectors reserved or negative on failure
2274 */
2275static int ice_ena_msix_range(struct ice_pf *pf)
2276{
2277 int v_left, v_actual, v_budget = 0;
2278 int needed, err, i;
2279
2280 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
2281
2282 /* reserve one vector for miscellaneous handler */
2283 needed = 1;
152b978a
AV
2284 if (v_left < needed)
2285 goto no_hw_vecs_left_err;
940b61af
AV
2286 v_budget += needed;
2287 v_left -= needed;
2288
2289 /* reserve vectors for LAN traffic */
152b978a
AV
2290 needed = min_t(int, num_online_cpus(), v_left);
2291 if (v_left < needed)
2292 goto no_hw_vecs_left_err;
2293 pf->num_lan_msix = needed;
2294 v_budget += needed;
2295 v_left -= needed;
940b61af
AV
2296
2297 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
c6dfd690 2298 sizeof(*pf->msix_entries), GFP_KERNEL);
940b61af
AV
2299
2300 if (!pf->msix_entries) {
2301 err = -ENOMEM;
2302 goto exit_err;
2303 }
2304
2305 for (i = 0; i < v_budget; i++)
2306 pf->msix_entries[i].entry = i;
2307
2308 /* actually reserve the vectors */
2309 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
2310 ICE_MIN_MSIX, v_budget);
2311
2312 if (v_actual < 0) {
2313 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
2314 err = v_actual;
2315 goto msix_err;
2316 }
2317
2318 if (v_actual < v_budget) {
2319 dev_warn(&pf->pdev->dev,
152b978a 2320 "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
940b61af 2321 v_budget, v_actual);
152b978a
AV
2322/* 2 vectors for LAN (traffic + OICR) */
2323#define ICE_MIN_LAN_VECS 2
2324
2325 if (v_actual < ICE_MIN_LAN_VECS) {
2326 /* error if we can't get minimum vectors */
940b61af
AV
2327 pci_disable_msix(pf->pdev);
2328 err = -ERANGE;
2329 goto msix_err;
152b978a
AV
2330 } else {
2331 pf->num_lan_msix = ICE_MIN_LAN_VECS;
940b61af
AV
2332 }
2333 }
2334
2335 return v_actual;
2336
2337msix_err:
2338 devm_kfree(&pf->pdev->dev, pf->msix_entries);
2339 goto exit_err;
2340
152b978a
AV
2341no_hw_vecs_left_err:
2342 dev_err(&pf->pdev->dev,
2343 "not enough device MSI-X vectors. requested = %d, available = %d\n",
2344 needed, v_left);
2345 err = -ERANGE;
940b61af
AV
2346exit_err:
2347 pf->num_lan_msix = 0;
940b61af
AV
2348 return err;
2349}
2350
2351/**
2352 * ice_dis_msix - Disable MSI-X interrupt setup in OS
2353 * @pf: board private structure
2354 */
2355static void ice_dis_msix(struct ice_pf *pf)
2356{
2357 pci_disable_msix(pf->pdev);
2358 devm_kfree(&pf->pdev->dev, pf->msix_entries);
2359 pf->msix_entries = NULL;
940b61af
AV
2360}
2361
eb0208ec
PB
2362/**
2363 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
2364 * @pf: board private structure
2365 */
2366static void ice_clear_interrupt_scheme(struct ice_pf *pf)
2367{
ba880734 2368 ice_dis_msix(pf);
eb0208ec 2369
cbe66bfe
BC
2370 if (pf->irq_tracker) {
2371 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
2372 pf->irq_tracker = NULL;
eb0208ec
PB
2373 }
2374}
2375
940b61af
AV
2376/**
2377 * ice_init_interrupt_scheme - Determine proper interrupt scheme
2378 * @pf: board private structure to initialize
2379 */
2380static int ice_init_interrupt_scheme(struct ice_pf *pf)
2381{
cbe66bfe 2382 int vectors;
940b61af 2383
ba880734 2384 vectors = ice_ena_msix_range(pf);
940b61af
AV
2385
2386 if (vectors < 0)
2387 return vectors;
2388
2389 /* set up vector assignment tracking */
cbe66bfe
BC
2390 pf->irq_tracker =
2391 devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) +
c6dfd690 2392 (sizeof(u16) * vectors), GFP_KERNEL);
cbe66bfe 2393 if (!pf->irq_tracker) {
940b61af
AV
2394 ice_dis_msix(pf);
2395 return -ENOMEM;
2396 }
2397
eb0208ec
PB
2398 /* populate SW interrupts pool with number of OS granted IRQs. */
2399 pf->num_avail_sw_msix = vectors;
cbe66bfe
BC
2400 pf->irq_tracker->num_entries = vectors;
2401 pf->irq_tracker->end = pf->irq_tracker->num_entries;
eb0208ec
PB
2402
2403 return 0;
940b61af
AV
2404}
2405
c585ea42
BC
2406/**
2407 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
2408 * @pf: pointer to the PF structure
2409 *
2410 * There is no error returned here because the driver should be able to handle
2411 * 128 Byte cache lines, so we only print a warning in case issues are seen,
2412 * specifically with Tx.
2413 */
2414static void ice_verify_cacheline_size(struct ice_pf *pf)
2415{
2416 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
2417 dev_warn(&pf->pdev->dev,
2418 "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
2419 ICE_CACHE_LINE_BYTES);
2420}
2421
837f08fd
AV
2422/**
2423 * ice_probe - Device initialization routine
2424 * @pdev: PCI device information struct
2425 * @ent: entry in ice_pci_tbl
2426 *
2427 * Returns 0 on success, negative on failure
2428 */
c8b7abdd
BA
2429static int
2430ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
837f08fd 2431{
77ed84f4 2432 struct device *dev = &pdev->dev;
837f08fd
AV
2433 struct ice_pf *pf;
2434 struct ice_hw *hw;
2435 int err;
2436
fe34c89d 2437 /* this driver uses devres, see Documentation/driver-api/driver-model/devres.rst */
837f08fd
AV
2438 err = pcim_enable_device(pdev);
2439 if (err)
2440 return err;
2441
2442 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
2443 if (err) {
77ed84f4 2444 dev_err(dev, "BAR0 I/O map error %d\n", err);
837f08fd
AV
2445 return err;
2446 }
2447
77ed84f4 2448 pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
837f08fd
AV
2449 if (!pf)
2450 return -ENOMEM;
2451
2f2da36e 2452 /* set up for high or low DMA */
77ed84f4 2453 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
837f08fd 2454 if (err)
77ed84f4 2455 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
837f08fd 2456 if (err) {
77ed84f4 2457 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
837f08fd
AV
2458 return err;
2459 }
2460
2461 pci_enable_pcie_error_reporting(pdev);
2462 pci_set_master(pdev);
2463
2464 pf->pdev = pdev;
2465 pci_set_drvdata(pdev, pf);
2466 set_bit(__ICE_DOWN, pf->state);
8d81fa55
AA
2467 /* Disable service task until DOWN bit is cleared */
2468 set_bit(__ICE_SERVICE_DIS, pf->state);
837f08fd
AV
2469
2470 hw = &pf->hw;
2471 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
2472 hw->back = pf;
2473 hw->vendor_id = pdev->vendor;
2474 hw->device_id = pdev->device;
2475 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2476 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2477 hw->subsystem_device_id = pdev->subsystem_device;
2478 hw->bus.device = PCI_SLOT(pdev->devfn);
2479 hw->bus.func = PCI_FUNC(pdev->devfn);
f31e4b6f
AV
2480 ice_set_ctrlq_len(hw);
2481
837f08fd
AV
2482 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
2483
7ec59eea
AV
2484#ifndef CONFIG_DYNAMIC_DEBUG
2485 if (debug < -1)
2486 hw->debug_mask = debug;
2487#endif
2488
f31e4b6f
AV
2489 err = ice_init_hw(hw);
2490 if (err) {
77ed84f4 2491 dev_err(dev, "ice_init_hw failed: %d\n", err);
f31e4b6f
AV
2492 err = -EIO;
2493 goto err_exit_unroll;
2494 }
2495
77ed84f4 2496 dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
f31e4b6f
AV
2497 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2498 hw->api_maj_ver, hw->api_min_ver);
2499
78b5713a
AV
2500 err = ice_init_pf(pf);
2501 if (err) {
2502 dev_err(dev, "ice_init_pf failed: %d\n", err);
2503 goto err_init_pf_unroll;
2504 }
940b61af 2505
e223eaec 2506 err = ice_init_pf_dcb(pf, false);
37b6f646
AV
2507 if (err) {
2508 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
2509 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
2510
2511 /* do not fail overall init if DCB init fails */
2512 err = 0;
2513 }
2514
940b61af
AV
2515 ice_determine_q_usage(pf);
2516
995c90f2 2517 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
940b61af
AV
2518 if (!pf->num_alloc_vsi) {
2519 err = -EIO;
2520 goto err_init_pf_unroll;
2521 }
2522
77ed84f4
BA
2523 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
2524 GFP_KERNEL);
940b61af
AV
2525 if (!pf->vsi) {
2526 err = -ENOMEM;
2527 goto err_init_pf_unroll;
2528 }
2529
2530 err = ice_init_interrupt_scheme(pf);
2531 if (err) {
77ed84f4 2532 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
940b61af
AV
2533 err = -EIO;
2534 goto err_init_interrupt_unroll;
2535 }
2536
8d81fa55
AA
2537 /* Driver is mostly up */
2538 clear_bit(__ICE_DOWN, pf->state);
2539
940b61af
AV
2540 /* In case of MSIX we are going to setup the misc vector right here
2541 * to handle admin queue events etc. In case of legacy and MSI
2542 * the misc functionality and queue processing is combined in
2543 * the same vector and that gets setup at open.
2544 */
ba880734
BC
2545 err = ice_req_irq_msix_misc(pf);
2546 if (err) {
2547 dev_err(dev, "setup of misc vector failed: %d\n", err);
2548 goto err_init_interrupt_unroll;
940b61af
AV
2549 }
2550
2551 /* create switch struct for the switch element created by FW on boot */
77ed84f4 2552 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
940b61af
AV
2553 if (!pf->first_sw) {
2554 err = -ENOMEM;
2555 goto err_msix_misc_unroll;
2556 }
2557
b1edc14a
MFIP
2558 if (hw->evb_veb)
2559 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
2560 else
2561 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
2562
940b61af
AV
2563 pf->first_sw->pf = pf;
2564
2565 /* record the sw_id available for later use */
2566 pf->first_sw->sw_id = hw->port_info->sw_id;
2567
3a858ba3
AV
2568 err = ice_setup_pf_sw(pf);
2569 if (err) {
2f2da36e 2570 dev_err(dev, "probe failed due to setup PF switch:%d\n", err);
3a858ba3
AV
2571 goto err_alloc_sw_unroll;
2572 }
9daf8208 2573
8d81fa55 2574 clear_bit(__ICE_SERVICE_DIS, pf->state);
9daf8208
AV
2575
2576 /* since everything is good, start the service timer */
2577 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2578
250c3b3e
BC
2579 err = ice_init_link_events(pf->hw.port_info);
2580 if (err) {
2581 dev_err(dev, "ice_init_link_events failed: %d\n", err);
2582 goto err_alloc_sw_unroll;
2583 }
2584
c585ea42
BC
2585 ice_verify_cacheline_size(pf);
2586
837f08fd 2587 return 0;
f31e4b6f 2588
3a858ba3 2589err_alloc_sw_unroll:
8d81fa55 2590 set_bit(__ICE_SERVICE_DIS, pf->state);
3a858ba3
AV
2591 set_bit(__ICE_DOWN, pf->state);
2592 devm_kfree(&pf->pdev->dev, pf->first_sw);
940b61af
AV
2593err_msix_misc_unroll:
2594 ice_free_irq_msix_misc(pf);
2595err_init_interrupt_unroll:
2596 ice_clear_interrupt_scheme(pf);
77ed84f4 2597 devm_kfree(dev, pf->vsi);
940b61af
AV
2598err_init_pf_unroll:
2599 ice_deinit_pf(pf);
2600 ice_deinit_hw(hw);
f31e4b6f
AV
2601err_exit_unroll:
2602 pci_disable_pcie_error_reporting(pdev);
2603 return err;
837f08fd
AV
2604}
2605
2606/**
2607 * ice_remove - Device removal routine
2608 * @pdev: PCI device information struct
2609 */
2610static void ice_remove(struct pci_dev *pdev)
2611{
2612 struct ice_pf *pf = pci_get_drvdata(pdev);
81b23589 2613 int i;
837f08fd
AV
2614
2615 if (!pf)
2616 return;
2617
afd9d4ab
AV
2618 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
2619 if (!ice_is_reset_in_progress(pf->state))
2620 break;
2621 msleep(100);
2622 }
2623
837f08fd 2624 set_bit(__ICE_DOWN, pf->state);
8d81fa55 2625 ice_service_task_stop(pf);
f31e4b6f 2626
ddf30f7f
AV
2627 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2628 ice_free_vfs(pf);
0f9d5027 2629 ice_vsi_release_all(pf);
940b61af 2630 ice_free_irq_msix_misc(pf);
81b23589
DE
2631 ice_for_each_vsi(pf, i) {
2632 if (!pf->vsi[i])
2633 continue;
2634 ice_vsi_free_q_vectors(pf->vsi[i]);
2635 }
940b61af 2636 ice_deinit_pf(pf);
f31e4b6f 2637 ice_deinit_hw(&pf->hw);
ae2bdbb4 2638 ice_clear_interrupt_scheme(pf);
837f08fd
AV
2639 pci_disable_pcie_error_reporting(pdev);
2640}
2641
5995b6d0
BC
2642/**
2643 * ice_pci_err_detected - warning that PCI error has been detected
2644 * @pdev: PCI device information struct
2645 * @err: the type of PCI error
2646 *
2647 * Called to warn that something happened on the PCI bus and the error handling
2648 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
2649 */
2650static pci_ers_result_t
2651ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
2652{
2653 struct ice_pf *pf = pci_get_drvdata(pdev);
2654
2655 if (!pf) {
2656 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
2657 __func__, err);
2658 return PCI_ERS_RESULT_DISCONNECT;
2659 }
2660
2661 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
2662 ice_service_task_stop(pf);
2663
2664 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
2665 set_bit(__ICE_PFR_REQ, pf->state);
2666 ice_prepare_for_reset(pf);
2667 }
2668 }
2669
2670 return PCI_ERS_RESULT_NEED_RESET;
2671}
2672
2673/**
2674 * ice_pci_err_slot_reset - a PCI slot reset has just happened
2675 * @pdev: PCI device information struct
2676 *
2677 * Called to determine if the driver can recover from the PCI slot reset by
2678 * using a register read to determine if the device is recoverable.
2679 */
2680static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
2681{
2682 struct ice_pf *pf = pci_get_drvdata(pdev);
2683 pci_ers_result_t result;
2684 int err;
2685 u32 reg;
2686
2687 err = pci_enable_device_mem(pdev);
2688 if (err) {
2689 dev_err(&pdev->dev,
2690 "Cannot re-enable PCI device after reset, error %d\n",
2691 err);
2692 result = PCI_ERS_RESULT_DISCONNECT;
2693 } else {
2694 pci_set_master(pdev);
2695 pci_restore_state(pdev);
2696 pci_save_state(pdev);
2697 pci_wake_from_d3(pdev, false);
2698
2699 /* Check for life */
2700 reg = rd32(&pf->hw, GLGEN_RTRIG);
2701 if (!reg)
2702 result = PCI_ERS_RESULT_RECOVERED;
2703 else
2704 result = PCI_ERS_RESULT_DISCONNECT;
2705 }
2706
2707 err = pci_cleanup_aer_uncorrect_error_status(pdev);
2708 if (err)
2709 dev_dbg(&pdev->dev,
2710 "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
2711 err);
2712 /* non-fatal, continue */
2713
2714 return result;
2715}
2716
2717/**
2718 * ice_pci_err_resume - restart operations after PCI error recovery
2719 * @pdev: PCI device information struct
2720 *
2721 * Called to allow the driver to bring things back up after PCI error and/or
2722 * reset recovery have finished
2723 */
2724static void ice_pci_err_resume(struct pci_dev *pdev)
2725{
2726 struct ice_pf *pf = pci_get_drvdata(pdev);
2727
2728 if (!pf) {
2729 dev_err(&pdev->dev,
2730 "%s failed, device is unrecoverable\n", __func__);
2731 return;
2732 }
2733
2734 if (test_bit(__ICE_SUSPENDED, pf->state)) {
2735 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
2736 __func__);
2737 return;
2738 }
2739
2740 ice_do_reset(pf, ICE_RESET_PFR);
2741 ice_service_task_restart(pf);
2742 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2743}
2744
2745/**
2746 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
2747 * @pdev: PCI device information struct
2748 */
2749static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
2750{
2751 struct ice_pf *pf = pci_get_drvdata(pdev);
2752
2753 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
2754 ice_service_task_stop(pf);
2755
2756 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
2757 set_bit(__ICE_PFR_REQ, pf->state);
2758 ice_prepare_for_reset(pf);
2759 }
2760 }
2761}
2762
2763/**
2764 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
2765 * @pdev: PCI device information struct
2766 */
2767static void ice_pci_err_reset_done(struct pci_dev *pdev)
2768{
2769 ice_pci_err_resume(pdev);
2770}
2771
837f08fd
AV
2772/* ice_pci_tbl - PCI Device ID Table
2773 *
2774 * Wildcard entries (PCI_ANY_ID) should come last
2775 * Last entry must be all 0s
2776 *
2777 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
2778 * Class, Class Mask, private data (not used) }
2779 */
2780static const struct pci_device_id ice_pci_tbl[] = {
633d7449
AV
2781 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
2782 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
2783 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
837f08fd
AV
2784 /* required last entry */
2785 { 0, }
2786};
2787MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
2788
5995b6d0
BC
2789static const struct pci_error_handlers ice_pci_err_handler = {
2790 .error_detected = ice_pci_err_detected,
2791 .slot_reset = ice_pci_err_slot_reset,
2792 .reset_prepare = ice_pci_err_reset_prepare,
2793 .reset_done = ice_pci_err_reset_done,
2794 .resume = ice_pci_err_resume
2795};
2796
837f08fd
AV
2797static struct pci_driver ice_driver = {
2798 .name = KBUILD_MODNAME,
2799 .id_table = ice_pci_tbl,
2800 .probe = ice_probe,
2801 .remove = ice_remove,
ddf30f7f 2802 .sriov_configure = ice_sriov_configure,
5995b6d0 2803 .err_handler = &ice_pci_err_handler
837f08fd
AV
2804};
2805
2806/**
2807 * ice_module_init - Driver registration routine
2808 *
2809 * ice_module_init is the first routine called when the driver is
2810 * loaded. All it does is register with the PCI subsystem.
2811 */
2812static int __init ice_module_init(void)
2813{
2814 int status;
2815
2816 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
2817 pr_info("%s\n", ice_copyright);
2818
0f9d5027 2819 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
940b61af
AV
2820 if (!ice_wq) {
2821 pr_err("Failed to create workqueue\n");
2822 return -ENOMEM;
2823 }
2824
837f08fd 2825 status = pci_register_driver(&ice_driver);
940b61af 2826 if (status) {
2f2da36e 2827 pr_err("failed to register PCI driver, err %d\n", status);
940b61af
AV
2828 destroy_workqueue(ice_wq);
2829 }
837f08fd
AV
2830
2831 return status;
2832}
2833module_init(ice_module_init);
2834
2835/**
2836 * ice_module_exit - Driver exit cleanup routine
2837 *
2838 * ice_module_exit is called just before the driver is removed
2839 * from memory.
2840 */
2841static void __exit ice_module_exit(void)
2842{
2843 pci_unregister_driver(&ice_driver);
940b61af 2844 destroy_workqueue(ice_wq);
837f08fd
AV
2845 pr_info("module unloaded\n");
2846}
2847module_exit(ice_module_exit);
3a858ba3 2848
e94d4478 2849/**
f9867df6 2850 * ice_set_mac_address - NDO callback to set MAC address
e94d4478
AV
2851 * @netdev: network interface device structure
2852 * @pi: pointer to an address structure
2853 *
2854 * Returns 0 on success, negative on failure
2855 */
2856static int ice_set_mac_address(struct net_device *netdev, void *pi)
2857{
2858 struct ice_netdev_priv *np = netdev_priv(netdev);
2859 struct ice_vsi *vsi = np->vsi;
2860 struct ice_pf *pf = vsi->back;
2861 struct ice_hw *hw = &pf->hw;
2862 struct sockaddr *addr = pi;
2863 enum ice_status status;
e94d4478 2864 u8 flags = 0;
bbb968e8 2865 int err = 0;
e94d4478
AV
2866 u8 *mac;
2867
2868 mac = (u8 *)addr->sa_data;
2869
2870 if (!is_valid_ether_addr(mac))
2871 return -EADDRNOTAVAIL;
2872
2873 if (ether_addr_equal(netdev->dev_addr, mac)) {
2874 netdev_warn(netdev, "already using mac %pM\n", mac);
2875 return 0;
2876 }
2877
2878 if (test_bit(__ICE_DOWN, pf->state) ||
5df7e45d 2879 ice_is_reset_in_progress(pf->state)) {
e94d4478
AV
2880 netdev_err(netdev, "can't set mac %pM. device not ready\n",
2881 mac);
2882 return -EBUSY;
2883 }
2884
f9867df6
AV
2885 /* When we change the MAC address we also have to change the MAC address
2886 * based filter rules that were created previously for the old MAC
e94d4478 2887 * address. So first, we remove the old filter rule using ice_remove_mac
bbb968e8
AA
2888 * and then create a new filter rule using ice_add_mac via
2889 * ice_vsi_cfg_mac_fltr function call for both add and/or remove
2890 * filters.
e94d4478 2891 */
bbb968e8 2892 status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
e94d4478
AV
2893 if (status) {
2894 err = -EADDRNOTAVAIL;
bbb968e8 2895 goto err_update_filters;
e94d4478
AV
2896 }
2897
bbb968e8 2898 status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
e94d4478
AV
2899 if (status) {
2900 err = -EADDRNOTAVAIL;
bbb968e8 2901 goto err_update_filters;
e94d4478
AV
2902 }
2903
bbb968e8 2904err_update_filters:
e94d4478 2905 if (err) {
2f2da36e 2906 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
e94d4478
AV
2907 mac);
2908 return err;
2909 }
2910
f9867df6 2911 /* change the netdev's MAC address */
e94d4478 2912 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2f2da36e 2913 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
e94d4478
AV
2914 netdev->dev_addr);
2915
f9867df6 2916 /* write new MAC address to the firmware */
e94d4478
AV
2917 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2918 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
2919 if (status) {
bbb968e8
AA
2920 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
2921 mac, status);
e94d4478
AV
2922 }
2923 return 0;
2924}
2925
2926/**
2927 * ice_set_rx_mode - NDO callback to set the netdev filters
2928 * @netdev: network interface device structure
2929 */
2930static void ice_set_rx_mode(struct net_device *netdev)
2931{
2932 struct ice_netdev_priv *np = netdev_priv(netdev);
2933 struct ice_vsi *vsi = np->vsi;
2934
2935 if (!vsi)
2936 return;
2937
2938 /* Set the flags to synchronize filters
2939 * ndo_set_rx_mode may be triggered even without a change in netdev
2940 * flags
2941 */
2942 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
2943 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
2944 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
2945
2946 /* schedule our worker thread which will take care of
2947 * applying the new filter changes
2948 */
2949 ice_service_task_schedule(vsi->back);
2950}
2951
2952/**
2953 * ice_fdb_add - add an entry to the hardware database
2954 * @ndm: the input from the stack
2955 * @tb: pointer to array of nladdr (unused)
2956 * @dev: the net device pointer
2957 * @addr: the MAC address entry being added
f9867df6 2958 * @vid: VLAN ID
e94d4478 2959 * @flags: instructions from stack about fdb operation
99be37ed 2960 * @extack: netlink extended ack
e94d4478 2961 */
99be37ed
BA
2962static int
2963ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
2964 struct net_device *dev, const unsigned char *addr, u16 vid,
2965 u16 flags, struct netlink_ext_ack __always_unused *extack)
e94d4478
AV
2966{
2967 int err;
2968
2969 if (vid) {
2970 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
2971 return -EINVAL;
2972 }
2973 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
2974 netdev_err(dev, "FDB only supports static addresses\n");
2975 return -EINVAL;
2976 }
2977
2978 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2979 err = dev_uc_add_excl(dev, addr);
2980 else if (is_multicast_ether_addr(addr))
2981 err = dev_mc_add_excl(dev, addr);
2982 else
2983 err = -EINVAL;
2984
2985 /* Only return duplicate errors if NLM_F_EXCL is set */
2986 if (err == -EEXIST && !(flags & NLM_F_EXCL))
2987 err = 0;
2988
2989 return err;
2990}
2991
2992/**
2993 * ice_fdb_del - delete an entry from the hardware database
2994 * @ndm: the input from the stack
2995 * @tb: pointer to array of nladdr (unused)
2996 * @dev: the net device pointer
2997 * @addr: the MAC address entry being added
f9867df6 2998 * @vid: VLAN ID
e94d4478 2999 */
c8b7abdd
BA
3000static int
3001ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
3002 struct net_device *dev, const unsigned char *addr,
3003 __always_unused u16 vid)
e94d4478
AV
3004{
3005 int err;
3006
3007 if (ndm->ndm_state & NUD_PERMANENT) {
3008 netdev_err(dev, "FDB only supports static addresses\n");
3009 return -EINVAL;
3010 }
3011
3012 if (is_unicast_ether_addr(addr))
3013 err = dev_uc_del(dev, addr);
3014 else if (is_multicast_ether_addr(addr))
3015 err = dev_mc_del(dev, addr);
3016 else
3017 err = -EINVAL;
3018
3019 return err;
3020}
3021
d76a60ba
AV
3022/**
3023 * ice_set_features - set the netdev feature flags
3024 * @netdev: ptr to the netdev being adjusted
3025 * @features: the feature set that the stack is suggesting
3026 */
c8b7abdd
BA
3027static int
3028ice_set_features(struct net_device *netdev, netdev_features_t features)
d76a60ba
AV
3029{
3030 struct ice_netdev_priv *np = netdev_priv(netdev);
3031 struct ice_vsi *vsi = np->vsi;
3032 int ret = 0;
3033
8f529ff9
TN
3034 /* Multiple features can be changed in one call so keep features in
3035 * separate if/else statements to guarantee each feature is checked
3036 */
492af0ab
MFIP
3037 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
3038 ret = ice_vsi_manage_rss_lut(vsi, true);
3039 else if (!(features & NETIF_F_RXHASH) &&
3040 netdev->features & NETIF_F_RXHASH)
3041 ret = ice_vsi_manage_rss_lut(vsi, false);
3042
d76a60ba
AV
3043 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
3044 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
3045 ret = ice_vsi_manage_vlan_stripping(vsi, true);
3046 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
3047 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
3048 ret = ice_vsi_manage_vlan_stripping(vsi, false);
8f529ff9
TN
3049
3050 if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
3051 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
d76a60ba
AV
3052 ret = ice_vsi_manage_vlan_insertion(vsi);
3053 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
3054 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
3055 ret = ice_vsi_manage_vlan_insertion(vsi);
3056
3171948e
TN
3057 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3058 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3059 ret = ice_cfg_vlan_pruning(vsi, true, false);
3060 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3061 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3062 ret = ice_cfg_vlan_pruning(vsi, false, false);
3063
d76a60ba
AV
3064 return ret;
3065}
3066
3067/**
f9867df6
AV
3068 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
3069 * @vsi: VSI to setup VLAN properties for
d76a60ba
AV
3070 */
3071static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
3072{
3073 int ret = 0;
3074
3075 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3076 ret = ice_vsi_manage_vlan_stripping(vsi, true);
3077 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
3078 ret = ice_vsi_manage_vlan_insertion(vsi);
3079
3080 return ret;
3081}
3082
cdedef59
AV
3083/**
3084 * ice_vsi_cfg - Setup the VSI
3085 * @vsi: the VSI being configured
3086 *
3087 * Return 0 on success and negative value on error
3088 */
0e674aeb 3089int ice_vsi_cfg(struct ice_vsi *vsi)
cdedef59
AV
3090{
3091 int err;
3092
c7f2c42b
AV
3093 if (vsi->netdev) {
3094 ice_set_rx_mode(vsi->netdev);
9ecd25c2
AV
3095
3096 err = ice_vsi_vlan_setup(vsi);
3097
c7f2c42b
AV
3098 if (err)
3099 return err;
3100 }
a629cf0a 3101 ice_vsi_cfg_dcb_rings(vsi);
03f7a986
AV
3102
3103 err = ice_vsi_cfg_lan_txqs(vsi);
cdedef59
AV
3104 if (!err)
3105 err = ice_vsi_cfg_rxqs(vsi);
3106
3107 return err;
3108}
3109
2b245cb2
AV
3110/**
3111 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3112 * @vsi: the VSI being configured
3113 */
3114static void ice_napi_enable_all(struct ice_vsi *vsi)
3115{
3116 int q_idx;
3117
3118 if (!vsi->netdev)
3119 return;
3120
b4603dbf 3121 ice_for_each_q_vector(vsi, q_idx) {
eec90376
YX
3122 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3123
3124 if (q_vector->rx.ring || q_vector->tx.ring)
3125 napi_enable(&q_vector->napi);
3126 }
2b245cb2
AV
3127}
3128
cdedef59
AV
3129/**
3130 * ice_up_complete - Finish the last steps of bringing up a connection
3131 * @vsi: The VSI being configured
3132 *
3133 * Return 0 on success and negative value on error
3134 */
3135static int ice_up_complete(struct ice_vsi *vsi)
3136{
3137 struct ice_pf *pf = vsi->back;
3138 int err;
3139
ba880734 3140 ice_vsi_cfg_msix(vsi);
cdedef59
AV
3141
3142 /* Enable only Rx rings, Tx rings were enabled by the FW when the
3143 * Tx queue group list was configured and the context bits were
3144 * programmed using ice_vsi_cfg_txqs
3145 */
3146 err = ice_vsi_start_rx_rings(vsi);
3147 if (err)
3148 return err;
3149
3150 clear_bit(__ICE_DOWN, vsi->state);
2b245cb2 3151 ice_napi_enable_all(vsi);
cdedef59
AV
3152 ice_vsi_ena_irq(vsi);
3153
3154 if (vsi->port_info &&
3155 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
3156 vsi->netdev) {
3157 ice_print_link_msg(vsi, true);
3158 netif_tx_start_all_queues(vsi->netdev);
3159 netif_carrier_on(vsi->netdev);
3160 }
3161
3162 ice_service_task_schedule(pf);
3163
1b5c19c7 3164 return 0;
cdedef59
AV
3165}
3166
fcea6f3d
AV
3167/**
3168 * ice_up - Bring the connection back up after being down
3169 * @vsi: VSI being configured
3170 */
3171int ice_up(struct ice_vsi *vsi)
3172{
3173 int err;
3174
3175 err = ice_vsi_cfg(vsi);
3176 if (!err)
3177 err = ice_up_complete(vsi);
3178
3179 return err;
3180}
3181
3182/**
3183 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
3184 * @ring: Tx or Rx ring to read stats from
3185 * @pkts: packets stats counter
3186 * @bytes: bytes stats counter
3187 *
3188 * This function fetches stats from the ring considering the atomic operations
3189 * that needs to be performed to read u64 values in 32 bit machine.
3190 */
c8b7abdd
BA
3191static void
3192ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
fcea6f3d
AV
3193{
3194 unsigned int start;
3195 *pkts = 0;
3196 *bytes = 0;
3197
3198 if (!ring)
3199 return;
3200 do {
3201 start = u64_stats_fetch_begin_irq(&ring->syncp);
3202 *pkts = ring->stats.pkts;
3203 *bytes = ring->stats.bytes;
3204 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3205}
3206
fcea6f3d
AV
3207/**
3208 * ice_update_vsi_ring_stats - Update VSI stats counters
3209 * @vsi: the VSI to be updated
3210 */
3211static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
3212{
3213 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
3214 struct ice_ring *ring;
3215 u64 pkts, bytes;
3216 int i;
3217
3218 /* reset netdev stats */
3219 vsi_stats->tx_packets = 0;
3220 vsi_stats->tx_bytes = 0;
3221 vsi_stats->rx_packets = 0;
3222 vsi_stats->rx_bytes = 0;
3223
3224 /* reset non-netdev (extended) stats */
3225 vsi->tx_restart = 0;
3226 vsi->tx_busy = 0;
3227 vsi->tx_linearize = 0;
3228 vsi->rx_buf_failed = 0;
3229 vsi->rx_page_failed = 0;
3230
3231 rcu_read_lock();
3232
3233 /* update Tx rings counters */
3234 ice_for_each_txq(vsi, i) {
3235 ring = READ_ONCE(vsi->tx_rings[i]);
3236 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3237 vsi_stats->tx_packets += pkts;
3238 vsi_stats->tx_bytes += bytes;
3239 vsi->tx_restart += ring->tx_stats.restart_q;
3240 vsi->tx_busy += ring->tx_stats.tx_busy;
3241 vsi->tx_linearize += ring->tx_stats.tx_linearize;
3242 }
3243
3244 /* update Rx rings counters */
3245 ice_for_each_rxq(vsi, i) {
3246 ring = READ_ONCE(vsi->rx_rings[i]);
3247 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
3248 vsi_stats->rx_packets += pkts;
3249 vsi_stats->rx_bytes += bytes;
3250 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
3251 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
3252 }
3253
3254 rcu_read_unlock();
3255}
3256
3257/**
3258 * ice_update_vsi_stats - Update VSI stats counters
3259 * @vsi: the VSI to be updated
3260 */
5a4a8673 3261void ice_update_vsi_stats(struct ice_vsi *vsi)
fcea6f3d
AV
3262{
3263 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
3264 struct ice_eth_stats *cur_es = &vsi->eth_stats;
3265 struct ice_pf *pf = vsi->back;
3266
3267 if (test_bit(__ICE_DOWN, vsi->state) ||
3268 test_bit(__ICE_CFG_BUSY, pf->state))
3269 return;
3270
3271 /* get stats as recorded by Tx/Rx rings */
3272 ice_update_vsi_ring_stats(vsi);
3273
3274 /* get VSI stats as recorded by the hardware */
3275 ice_update_eth_stats(vsi);
3276
3277 cur_ns->tx_errors = cur_es->tx_errors;
3278 cur_ns->rx_dropped = cur_es->rx_discards;
3279 cur_ns->tx_dropped = cur_es->tx_discards;
3280 cur_ns->multicast = cur_es->rx_multicast;
3281
3282 /* update some more netdev stats if this is main VSI */
3283 if (vsi->type == ICE_VSI_PF) {
3284 cur_ns->rx_crc_errors = pf->stats.crc_errors;
3285 cur_ns->rx_errors = pf->stats.crc_errors +
3286 pf->stats.illegal_bytes;
3287 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
56923ab6
BC
3288 /* record drops from the port level */
3289 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
fcea6f3d
AV
3290 }
3291}
3292
3293/**
3294 * ice_update_pf_stats - Update PF port stats counters
3295 * @pf: PF whose stats needs to be updated
3296 */
5a4a8673 3297void ice_update_pf_stats(struct ice_pf *pf)
fcea6f3d
AV
3298{
3299 struct ice_hw_port_stats *prev_ps, *cur_ps;
3300 struct ice_hw *hw = &pf->hw;
9e7a5d17 3301 u8 port;
fcea6f3d 3302
9e7a5d17 3303 port = hw->port_info->lport;
fcea6f3d
AV
3304 prev_ps = &pf->stats_prev;
3305 cur_ps = &pf->stats;
fcea6f3d 3306
9e7a5d17 3307 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
36517fd3 3308 &prev_ps->eth.rx_bytes,
fcea6f3d
AV
3309 &cur_ps->eth.rx_bytes);
3310
9e7a5d17 3311 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
36517fd3 3312 &prev_ps->eth.rx_unicast,
fcea6f3d
AV
3313 &cur_ps->eth.rx_unicast);
3314
9e7a5d17 3315 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
36517fd3 3316 &prev_ps->eth.rx_multicast,
fcea6f3d
AV
3317 &cur_ps->eth.rx_multicast);
3318
9e7a5d17 3319 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
36517fd3 3320 &prev_ps->eth.rx_broadcast,
fcea6f3d
AV
3321 &cur_ps->eth.rx_broadcast);
3322
56923ab6
BC
3323 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
3324 &prev_ps->eth.rx_discards,
3325 &cur_ps->eth.rx_discards);
3326
9e7a5d17 3327 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
36517fd3 3328 &prev_ps->eth.tx_bytes,
fcea6f3d
AV
3329 &cur_ps->eth.tx_bytes);
3330
9e7a5d17 3331 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
36517fd3 3332 &prev_ps->eth.tx_unicast,
fcea6f3d
AV
3333 &cur_ps->eth.tx_unicast);
3334
9e7a5d17 3335 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
36517fd3 3336 &prev_ps->eth.tx_multicast,
fcea6f3d
AV
3337 &cur_ps->eth.tx_multicast);
3338
9e7a5d17 3339 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
36517fd3 3340 &prev_ps->eth.tx_broadcast,
fcea6f3d
AV
3341 &cur_ps->eth.tx_broadcast);
3342
9e7a5d17 3343 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
fcea6f3d
AV
3344 &prev_ps->tx_dropped_link_down,
3345 &cur_ps->tx_dropped_link_down);
3346
9e7a5d17 3347 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
36517fd3 3348 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
fcea6f3d 3349
9e7a5d17 3350 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
36517fd3 3351 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
fcea6f3d 3352
9e7a5d17 3353 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
36517fd3 3354 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
fcea6f3d 3355
9e7a5d17 3356 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
36517fd3 3357 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
fcea6f3d 3358
9e7a5d17 3359 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
fcea6f3d
AV
3360 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
3361
9e7a5d17 3362 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
3363 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
3364
9e7a5d17 3365 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
3366 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
3367
9e7a5d17 3368 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
36517fd3 3369 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
fcea6f3d 3370
9e7a5d17 3371 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
36517fd3 3372 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
fcea6f3d 3373
9e7a5d17 3374 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
36517fd3 3375 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
fcea6f3d 3376
9e7a5d17 3377 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
36517fd3 3378 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
fcea6f3d 3379
9e7a5d17 3380 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
fcea6f3d
AV
3381 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
3382
9e7a5d17 3383 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
3384 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
3385
9e7a5d17 3386 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
fcea6f3d
AV
3387 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
3388
9e7a5d17 3389 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3390 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
3391
9e7a5d17 3392 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3393 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
3394
9e7a5d17 3395 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3396 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
3397
9e7a5d17 3398 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3399 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
3400
4b0fdceb
AV
3401 ice_update_dcb_stats(pf);
3402
9e7a5d17 3403 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
fcea6f3d
AV
3404 &prev_ps->crc_errors, &cur_ps->crc_errors);
3405
9e7a5d17 3406 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3407 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
3408
9e7a5d17 3409 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3410 &prev_ps->mac_local_faults,
3411 &cur_ps->mac_local_faults);
3412
9e7a5d17 3413 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3414 &prev_ps->mac_remote_faults,
3415 &cur_ps->mac_remote_faults);
3416
9e7a5d17 3417 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3418 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
3419
9e7a5d17 3420 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3421 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
3422
9e7a5d17 3423 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3424 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
3425
9e7a5d17 3426 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3427 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
3428
9e7a5d17 3429 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
fcea6f3d
AV
3430 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
3431
3432 pf->stat_prev_loaded = true;
3433}
3434
3435/**
3436 * ice_get_stats64 - get statistics for network device structure
3437 * @netdev: network interface device structure
3438 * @stats: main device statistics structure
3439 */
3440static
3441void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3442{
3443 struct ice_netdev_priv *np = netdev_priv(netdev);
3444 struct rtnl_link_stats64 *vsi_stats;
3445 struct ice_vsi *vsi = np->vsi;
3446
3447 vsi_stats = &vsi->net_stats;
3448
3449 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
3450 return;
3451 /* netdev packet/byte stats come from ring counter. These are obtained
3452 * by summing up ring counters (done by ice_update_vsi_ring_stats).
3453 */
3454 ice_update_vsi_ring_stats(vsi);
3455 stats->tx_packets = vsi_stats->tx_packets;
3456 stats->tx_bytes = vsi_stats->tx_bytes;
3457 stats->rx_packets = vsi_stats->rx_packets;
3458 stats->rx_bytes = vsi_stats->rx_bytes;
3459
3460 /* The rest of the stats can be read from the hardware but instead we
3461 * just return values that the watchdog task has already obtained from
3462 * the hardware.
3463 */
3464 stats->multicast = vsi_stats->multicast;
3465 stats->tx_errors = vsi_stats->tx_errors;
3466 stats->tx_dropped = vsi_stats->tx_dropped;
3467 stats->rx_errors = vsi_stats->rx_errors;
3468 stats->rx_dropped = vsi_stats->rx_dropped;
3469 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
3470 stats->rx_length_errors = vsi_stats->rx_length_errors;
3471}
3472
2b245cb2
AV
3473/**
3474 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3475 * @vsi: VSI having NAPI disabled
3476 */
3477static void ice_napi_disable_all(struct ice_vsi *vsi)
3478{
3479 int q_idx;
3480
3481 if (!vsi->netdev)
3482 return;
3483
0c2561c8 3484 ice_for_each_q_vector(vsi, q_idx) {
eec90376
YX
3485 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
3486
3487 if (q_vector->rx.ring || q_vector->tx.ring)
3488 napi_disable(&q_vector->napi);
3489 }
2b245cb2
AV
3490}
3491
cdedef59
AV
3492/**
3493 * ice_down - Shutdown the connection
3494 * @vsi: The VSI being stopped
3495 */
fcea6f3d 3496int ice_down(struct ice_vsi *vsi)
cdedef59 3497{
ab4ab73f 3498 int i, tx_err, rx_err, link_err = 0;
cdedef59
AV
3499
3500 /* Caller of this function is expected to set the
3501 * vsi->state __ICE_DOWN bit
3502 */
3503 if (vsi->netdev) {
3504 netif_carrier_off(vsi->netdev);
3505 netif_tx_disable(vsi->netdev);
3506 }
3507
3508 ice_vsi_dis_irq(vsi);
03f7a986
AV
3509
3510 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
72adf242
AV
3511 if (tx_err)
3512 netdev_err(vsi->netdev,
3513 "Failed stop Tx rings, VSI %d error %d\n",
3514 vsi->vsi_num, tx_err);
3515
3516 rx_err = ice_vsi_stop_rx_rings(vsi);
3517 if (rx_err)
3518 netdev_err(vsi->netdev,
3519 "Failed stop Rx rings, VSI %d error %d\n",
3520 vsi->vsi_num, rx_err);
3521
2b245cb2 3522 ice_napi_disable_all(vsi);
cdedef59 3523
ab4ab73f
BA
3524 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
3525 link_err = ice_force_phys_link_state(vsi, false);
3526 if (link_err)
3527 netdev_err(vsi->netdev,
3528 "Failed to set physical link down, VSI %d error %d\n",
3529 vsi->vsi_num, link_err);
3530 }
b6f934f0 3531
cdedef59
AV
3532 ice_for_each_txq(vsi, i)
3533 ice_clean_tx_ring(vsi->tx_rings[i]);
3534
3535 ice_for_each_rxq(vsi, i)
3536 ice_clean_rx_ring(vsi->rx_rings[i]);
3537
b6f934f0 3538 if (tx_err || rx_err || link_err) {
72adf242
AV
3539 netdev_err(vsi->netdev,
3540 "Failed to close VSI 0x%04X on switch 0x%04X\n",
cdedef59 3541 vsi->vsi_num, vsi->vsw->sw_id);
72adf242
AV
3542 return -EIO;
3543 }
3544
3545 return 0;
cdedef59
AV
3546}
3547
3548/**
3549 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
3550 * @vsi: VSI having resources allocated
3551 *
3552 * Return 0 on success, negative on failure
3553 */
0e674aeb 3554int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
cdedef59 3555{
dab0588f 3556 int i, err = 0;
cdedef59
AV
3557
3558 if (!vsi->num_txq) {
3559 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
3560 vsi->vsi_num);
3561 return -EINVAL;
3562 }
3563
3564 ice_for_each_txq(vsi, i) {
72adf242 3565 vsi->tx_rings[i]->netdev = vsi->netdev;
cdedef59
AV
3566 err = ice_setup_tx_ring(vsi->tx_rings[i]);
3567 if (err)
3568 break;
3569 }
3570
3571 return err;
3572}
3573
3574/**
3575 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
3576 * @vsi: VSI having resources allocated
3577 *
3578 * Return 0 on success, negative on failure
3579 */
0e674aeb 3580int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
cdedef59 3581{
dab0588f 3582 int i, err = 0;
cdedef59
AV
3583
3584 if (!vsi->num_rxq) {
3585 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
3586 vsi->vsi_num);
3587 return -EINVAL;
3588 }
3589
3590 ice_for_each_rxq(vsi, i) {
72adf242 3591 vsi->rx_rings[i]->netdev = vsi->netdev;
cdedef59
AV
3592 err = ice_setup_rx_ring(vsi->rx_rings[i]);
3593 if (err)
3594 break;
3595 }
3596
3597 return err;
3598}
3599
cdedef59
AV
3600/**
3601 * ice_vsi_open - Called when a network interface is made active
3602 * @vsi: the VSI to open
3603 *
3604 * Initialization of the VSI
3605 *
3606 * Returns 0 on success, negative value on error
3607 */
3608static int ice_vsi_open(struct ice_vsi *vsi)
3609{
3610 char int_name[ICE_INT_NAME_STR_LEN];
3611 struct ice_pf *pf = vsi->back;
3612 int err;
3613
3614 /* allocate descriptors */
3615 err = ice_vsi_setup_tx_rings(vsi);
3616 if (err)
3617 goto err_setup_tx;
3618
3619 err = ice_vsi_setup_rx_rings(vsi);
3620 if (err)
3621 goto err_setup_rx;
3622
3623 err = ice_vsi_cfg(vsi);
3624 if (err)
3625 goto err_setup_rx;
3626
3627 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3628 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
ba880734 3629 err = ice_vsi_req_irq_msix(vsi, int_name);
cdedef59
AV
3630 if (err)
3631 goto err_setup_rx;
3632
3633 /* Notify the stack of the actual queue counts. */
3634 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
3635 if (err)
3636 goto err_set_qs;
3637
3638 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
3639 if (err)
3640 goto err_set_qs;
3641
3642 err = ice_up_complete(vsi);
3643 if (err)
3644 goto err_up_complete;
3645
3646 return 0;
3647
3648err_up_complete:
3649 ice_down(vsi);
3650err_set_qs:
3651 ice_vsi_free_irq(vsi);
3652err_setup_rx:
3653 ice_vsi_free_rx_rings(vsi);
3654err_setup_tx:
3655 ice_vsi_free_tx_rings(vsi);
3656
3657 return err;
3658}
3659
0f9d5027
AV
3660/**
3661 * ice_vsi_release_all - Delete all VSIs
3662 * @pf: PF from which all VSIs are being removed
3663 */
3664static void ice_vsi_release_all(struct ice_pf *pf)
3665{
3666 int err, i;
3667
3668 if (!pf->vsi)
3669 return;
3670
80ed404a 3671 ice_for_each_vsi(pf, i) {
0f9d5027
AV
3672 if (!pf->vsi[i])
3673 continue;
3674
3675 err = ice_vsi_release(pf->vsi[i]);
3676 if (err)
3677 dev_dbg(&pf->pdev->dev,
3678 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
3679 i, err, pf->vsi[i]->vsi_num);
3680 }
3681}
3682
0b28b702 3683/**
7b9ffc76
AV
3684 * ice_ena_vsi - resume a VSI
3685 * @vsi: the VSI being resume
d09e2693 3686 * @locked: is the rtnl_lock already held
0b28b702 3687 */
7b9ffc76 3688static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
0b28b702 3689{
7b9ffc76 3690 int err = 0;
0b28b702 3691
7b9ffc76 3692 if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
f27db2e6 3693 return 0;
7b9ffc76
AV
3694
3695 clear_bit(__ICE_NEEDS_RESTART, vsi->state);
3696
3697 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
124cd547 3698 if (netif_running(vsi->netdev)) {
f27db2e6 3699 if (!locked)
d09e2693 3700 rtnl_lock();
f27db2e6
AV
3701
3702 err = ice_open(vsi->netdev);
3703
3704 if (!locked)
d09e2693 3705 rtnl_unlock();
0f9d5027 3706 }
124cd547 3707 }
0b28b702 3708
0f9d5027 3709 return err;
0b28b702
AV
3710}
3711
0b28b702
AV
3712/**
3713 * ice_pf_ena_all_vsi - Resume all VSIs on a PF
3714 * @pf: the PF
7b9ffc76 3715 * @locked: is the rtnl_lock already held
0b28b702 3716 */
7b9ffc76
AV
3717#ifdef CONFIG_DCB
3718int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3719#else
3720static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
3721#endif /* CONFIG_DCB */
0b28b702
AV
3722{
3723 int v;
3724
3725 ice_for_each_vsi(pf, v)
3726 if (pf->vsi[v])
7b9ffc76 3727 if (ice_ena_vsi(pf->vsi[v], locked))
0f9d5027
AV
3728 return -EIO;
3729
3730 return 0;
3731}
3732
3733/**
2f2da36e 3734 * ice_vsi_rebuild_all - rebuild all VSIs in PF
0f9d5027
AV
3735 * @pf: the PF
3736 */
3737static int ice_vsi_rebuild_all(struct ice_pf *pf)
3738{
3739 int i;
3740
3741 /* loop through pf->vsi array and reinit the VSI if found */
80ed404a 3742 ice_for_each_vsi(pf, i) {
4425e053 3743 struct ice_vsi *vsi = pf->vsi[i];
0f9d5027
AV
3744 int err;
3745
4425e053 3746 if (!vsi)
0f9d5027
AV
3747 continue;
3748
4425e053 3749 err = ice_vsi_rebuild(vsi);
0f9d5027
AV
3750 if (err) {
3751 dev_err(&pf->pdev->dev,
3752 "VSI at index %d rebuild failed\n",
4425e053 3753 vsi->idx);
0f9d5027
AV
3754 return err;
3755 }
3756
3757 dev_info(&pf->pdev->dev,
3758 "VSI at index %d rebuilt. vsi_num = 0x%x\n",
4425e053 3759 vsi->idx, vsi->vsi_num);
0f9d5027
AV
3760 }
3761
3762 return 0;
0b28b702
AV
3763}
3764
334cb062
AV
3765/**
3766 * ice_vsi_replay_all - replay all VSIs configuration in the PF
3767 * @pf: the PF
3768 */
3769static int ice_vsi_replay_all(struct ice_pf *pf)
3770{
3771 struct ice_hw *hw = &pf->hw;
3772 enum ice_status ret;
3773 int i;
3774
3775 /* loop through pf->vsi array and replay the VSI if found */
80ed404a 3776 ice_for_each_vsi(pf, i) {
4425e053
KK
3777 struct ice_vsi *vsi = pf->vsi[i];
3778
3779 if (!vsi)
334cb062
AV
3780 continue;
3781
4425e053 3782 ret = ice_replay_vsi(hw, vsi->idx);
334cb062
AV
3783 if (ret) {
3784 dev_err(&pf->pdev->dev,
3785 "VSI at index %d replay failed %d\n",
4425e053 3786 vsi->idx, ret);
334cb062
AV
3787 return -EIO;
3788 }
3789
3790 /* Re-map HW VSI number, using VSI handle that has been
3791 * previously validated in ice_replay_vsi() call above
3792 */
4425e053 3793 vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
334cb062
AV
3794
3795 dev_info(&pf->pdev->dev,
3796 "VSI at index %d filter replayed successfully - vsi_num %i\n",
4425e053 3797 vsi->idx, vsi->vsi_num);
334cb062
AV
3798 }
3799
3800 /* Clean up replay filter after successful re-configuration */
3801 ice_replay_post(hw);
3802 return 0;
3803}
3804
0b28b702
AV
3805/**
3806 * ice_rebuild - rebuild after reset
2f2da36e 3807 * @pf: PF to rebuild
0b28b702
AV
3808 */
3809static void ice_rebuild(struct ice_pf *pf)
3810{
3811 struct device *dev = &pf->pdev->dev;
3812 struct ice_hw *hw = &pf->hw;
3813 enum ice_status ret;
ce317dd9 3814 int err, i;
0b28b702
AV
3815
3816 if (test_bit(__ICE_DOWN, pf->state))
3817 goto clear_recovery;
3818
2f2da36e 3819 dev_dbg(dev, "rebuilding PF\n");
0b28b702
AV
3820
3821 ret = ice_init_all_ctrlq(hw);
3822 if (ret) {
3823 dev_err(dev, "control queues init failed %d\n", ret);
0f9d5027 3824 goto err_init_ctrlq;
0b28b702
AV
3825 }
3826
3827 ret = ice_clear_pf_cfg(hw);
3828 if (ret) {
3829 dev_err(dev, "clear PF configuration failed %d\n", ret);
0f9d5027 3830 goto err_init_ctrlq;
0b28b702
AV
3831 }
3832
3833 ice_clear_pxe_mode(hw);
3834
3835 ret = ice_get_caps(hw);
3836 if (ret) {
3837 dev_err(dev, "ice_get_caps failed %d\n", ret);
0f9d5027 3838 goto err_init_ctrlq;
0b28b702
AV
3839 }
3840
0f9d5027
AV
3841 err = ice_sched_init_port(hw->port_info);
3842 if (err)
3843 goto err_sched_init_port;
3844
b832c2f6
AV
3845 ice_dcb_rebuild(pf);
3846
0f9d5027 3847 err = ice_vsi_rebuild_all(pf);
0b28b702 3848 if (err) {
0f9d5027
AV
3849 dev_err(dev, "ice_vsi_rebuild_all failed\n");
3850 goto err_vsi_rebuild;
3851 }
3852
5755143d
DE
3853 err = ice_update_link_info(hw->port_info);
3854 if (err)
3855 dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
3856
334cb062
AV
3857 /* Replay all VSIs Configuration, including filters after reset */
3858 if (ice_vsi_replay_all(pf)) {
0f9d5027 3859 dev_err(&pf->pdev->dev,
334cb062 3860 "error replaying VSI configurations with switch filter rules\n");
0f9d5027 3861 goto err_vsi_rebuild;
0b28b702
AV
3862 }
3863
3864 /* start misc vector */
ba880734
BC
3865 err = ice_req_irq_msix_misc(pf);
3866 if (err) {
3867 dev_err(dev, "misc vector setup failed: %d\n", err);
3868 goto err_vsi_rebuild;
0b28b702
AV
3869 }
3870
3871 /* restart the VSIs that were rebuilt and running before the reset */
7b9ffc76 3872 err = ice_pf_ena_all_vsi(pf, false);
0f9d5027
AV
3873 if (err) {
3874 dev_err(&pf->pdev->dev, "error enabling VSIs\n");
3875 /* no need to disable VSIs in tear down path in ice_rebuild()
3876 * since its already taken care in ice_vsi_open()
3877 */
3878 goto err_vsi_rebuild;
3879 }
0b28b702 3880
80ed404a 3881 ice_for_each_vsi(pf, i) {
ce317dd9
AV
3882 bool link_up;
3883
3884 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
3885 continue;
3886 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
3887 if (link_up) {
3888 netif_carrier_on(pf->vsi[i]->netdev);
3889 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
3890 } else {
3891 netif_carrier_off(pf->vsi[i]->netdev);
3892 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
3893 }
3894 }
3895
0f9d5027
AV
3896 /* if we get here, reset flow is successful */
3897 clear_bit(__ICE_RESET_FAILED, pf->state);
0b28b702
AV
3898 return;
3899
0f9d5027
AV
3900err_vsi_rebuild:
3901 ice_vsi_release_all(pf);
3902err_sched_init_port:
3903 ice_sched_cleanup_all(hw);
3904err_init_ctrlq:
0b28b702
AV
3905 ice_shutdown_all_ctrlq(hw);
3906 set_bit(__ICE_RESET_FAILED, pf->state);
3907clear_recovery:
0f9d5027
AV
3908 /* set this bit in PF state to control service task scheduling */
3909 set_bit(__ICE_NEEDS_RESTART, pf->state);
3910 dev_err(dev, "Rebuild failed, unload and reload driver\n");
0b28b702
AV
3911}
3912
e94d4478
AV
3913/**
3914 * ice_change_mtu - NDO callback to change the MTU
3915 * @netdev: network interface device structure
3916 * @new_mtu: new value for maximum frame size
3917 *
3918 * Returns 0 on success, negative on failure
3919 */
3920static int ice_change_mtu(struct net_device *netdev, int new_mtu)
3921{
3922 struct ice_netdev_priv *np = netdev_priv(netdev);
3923 struct ice_vsi *vsi = np->vsi;
3924 struct ice_pf *pf = vsi->back;
3925 u8 count = 0;
3926
3927 if (new_mtu == netdev->mtu) {
2f2da36e 3928 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
e94d4478
AV
3929 return 0;
3930 }
3931
3932 if (new_mtu < netdev->min_mtu) {
2f2da36e 3933 netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
e94d4478
AV
3934 netdev->min_mtu);
3935 return -EINVAL;
3936 } else if (new_mtu > netdev->max_mtu) {
2f2da36e 3937 netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
e94d4478
AV
3938 netdev->min_mtu);
3939 return -EINVAL;
3940 }
3941 /* if a reset is in progress, wait for some time for it to complete */
3942 do {
5df7e45d 3943 if (ice_is_reset_in_progress(pf->state)) {
e94d4478
AV
3944 count++;
3945 usleep_range(1000, 2000);
3946 } else {
3947 break;
3948 }
3949
3950 } while (count < 100);
3951
3952 if (count == 100) {
2f2da36e 3953 netdev_err(netdev, "can't change MTU. Device is busy\n");
e94d4478
AV
3954 return -EBUSY;
3955 }
3956
3957 netdev->mtu = new_mtu;
3958
3959 /* if VSI is up, bring it down and then back up */
3960 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
3961 int err;
3962
3963 err = ice_down(vsi);
3964 if (err) {
2f2da36e 3965 netdev_err(netdev, "change MTU if_up err %d\n", err);
e94d4478
AV
3966 return err;
3967 }
3968
3969 err = ice_up(vsi);
3970 if (err) {
2f2da36e 3971 netdev_err(netdev, "change MTU if_up err %d\n", err);
e94d4478
AV
3972 return err;
3973 }
3974 }
3975
4cc82aaa 3976 netdev_info(netdev, "changed MTU to %d\n", new_mtu);
e94d4478
AV
3977 return 0;
3978}
3979
d76a60ba
AV
3980/**
3981 * ice_set_rss - Set RSS keys and lut
3982 * @vsi: Pointer to VSI structure
3983 * @seed: RSS hash seed
3984 * @lut: Lookup table
3985 * @lut_size: Lookup table size
3986 *
3987 * Returns 0 on success, negative on failure
3988 */
3989int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
3990{
3991 struct ice_pf *pf = vsi->back;
3992 struct ice_hw *hw = &pf->hw;
3993 enum ice_status status;
3994
3995 if (seed) {
3996 struct ice_aqc_get_set_rss_keys *buf =
3997 (struct ice_aqc_get_set_rss_keys *)seed;
3998
4fb33f31 3999 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
d76a60ba
AV
4000
4001 if (status) {
4002 dev_err(&pf->pdev->dev,
4003 "Cannot set RSS key, err %d aq_err %d\n",
4004 status, hw->adminq.rq_last_status);
4005 return -EIO;
4006 }
4007 }
4008
4009 if (lut) {
4fb33f31
AV
4010 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
4011 lut, lut_size);
d76a60ba
AV
4012 if (status) {
4013 dev_err(&pf->pdev->dev,
4014 "Cannot set RSS lut, err %d aq_err %d\n",
4015 status, hw->adminq.rq_last_status);
4016 return -EIO;
4017 }
4018 }
4019
4020 return 0;
4021}
4022
4023/**
4024 * ice_get_rss - Get RSS keys and lut
4025 * @vsi: Pointer to VSI structure
4026 * @seed: Buffer to store the keys
4027 * @lut: Buffer to store the lookup table entries
4028 * @lut_size: Size of buffer to store the lookup table entries
4029 *
4030 * Returns 0 on success, negative on failure
4031 */
4032int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
4033{
4034 struct ice_pf *pf = vsi->back;
4035 struct ice_hw *hw = &pf->hw;
4036 enum ice_status status;
4037
4038 if (seed) {
4039 struct ice_aqc_get_set_rss_keys *buf =
4040 (struct ice_aqc_get_set_rss_keys *)seed;
4041
4fb33f31 4042 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
d76a60ba
AV
4043 if (status) {
4044 dev_err(&pf->pdev->dev,
4045 "Cannot get RSS key, err %d aq_err %d\n",
4046 status, hw->adminq.rq_last_status);
4047 return -EIO;
4048 }
4049 }
4050
4051 if (lut) {
4fb33f31
AV
4052 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
4053 lut, lut_size);
d76a60ba
AV
4054 if (status) {
4055 dev_err(&pf->pdev->dev,
4056 "Cannot get RSS lut, err %d aq_err %d\n",
4057 status, hw->adminq.rq_last_status);
4058 return -EIO;
4059 }
4060 }
4061
4062 return 0;
4063}
4064
b1edc14a
MFIP
4065/**
4066 * ice_bridge_getlink - Get the hardware bridge mode
4067 * @skb: skb buff
f9867df6 4068 * @pid: process ID
b1edc14a
MFIP
4069 * @seq: RTNL message seq
4070 * @dev: the netdev being configured
4071 * @filter_mask: filter mask passed in
4072 * @nlflags: netlink flags passed in
4073 *
4074 * Return the bridge mode (VEB/VEPA)
4075 */
4076static int
4077ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4078 struct net_device *dev, u32 filter_mask, int nlflags)
4079{
4080 struct ice_netdev_priv *np = netdev_priv(dev);
4081 struct ice_vsi *vsi = np->vsi;
4082 struct ice_pf *pf = vsi->back;
4083 u16 bmode;
4084
4085 bmode = pf->first_sw->bridge_mode;
4086
4087 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
4088 filter_mask, NULL);
4089}
4090
4091/**
4092 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
4093 * @vsi: Pointer to VSI structure
4094 * @bmode: Hardware bridge mode (VEB/VEPA)
4095 *
4096 * Returns 0 on success, negative on failure
4097 */
4098static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
4099{
4100 struct device *dev = &vsi->back->pdev->dev;
4101 struct ice_aqc_vsi_props *vsi_props;
4102 struct ice_hw *hw = &vsi->back->hw;
198a666a 4103 struct ice_vsi_ctx *ctxt;
b1edc14a 4104 enum ice_status status;
198a666a 4105 int ret = 0;
b1edc14a
MFIP
4106
4107 vsi_props = &vsi->info;
198a666a
BA
4108
4109 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
4110 if (!ctxt)
4111 return -ENOMEM;
4112
4113 ctxt->info = vsi->info;
b1edc14a
MFIP
4114
4115 if (bmode == BRIDGE_MODE_VEB)
4116 /* change from VEPA to VEB mode */
198a666a 4117 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
b1edc14a
MFIP
4118 else
4119 /* change from VEB to VEPA mode */
198a666a
BA
4120 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
4121 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
5726ca0e 4122
198a666a 4123 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
b1edc14a
MFIP
4124 if (status) {
4125 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
4126 bmode, status, hw->adminq.sq_last_status);
198a666a
BA
4127 ret = -EIO;
4128 goto out;
b1edc14a
MFIP
4129 }
4130 /* Update sw flags for book keeping */
198a666a 4131 vsi_props->sw_flags = ctxt->info.sw_flags;
b1edc14a 4132
198a666a
BA
4133out:
4134 devm_kfree(dev, ctxt);
4135 return ret;
b1edc14a
MFIP
4136}
4137
4138/**
4139 * ice_bridge_setlink - Set the hardware bridge mode
4140 * @dev: the netdev being configured
4141 * @nlh: RTNL message
4142 * @flags: bridge setlink flags
2fd527b7 4143 * @extack: netlink extended ack
b1edc14a
MFIP
4144 *
4145 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
4146 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
4147 * not already set for all VSIs connected to this switch. And also update the
4148 * unicast switch filter rules for the corresponding switch of the netdev.
4149 */
4150static int
4151ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
3d505147
BA
4152 u16 __always_unused flags,
4153 struct netlink_ext_ack __always_unused *extack)
b1edc14a
MFIP
4154{
4155 struct ice_netdev_priv *np = netdev_priv(dev);
4156 struct ice_pf *pf = np->vsi->back;
4157 struct nlattr *attr, *br_spec;
4158 struct ice_hw *hw = &pf->hw;
4159 enum ice_status status;
4160 struct ice_sw *pf_sw;
4161 int rem, v, err = 0;
4162
4163 pf_sw = pf->first_sw;
4164 /* find the attribute in the netlink message */
4165 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4166
4167 nla_for_each_nested(attr, br_spec, rem) {
4168 __u16 mode;
4169
4170 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4171 continue;
4172 mode = nla_get_u16(attr);
4173 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4174 return -EINVAL;
4175 /* Continue if bridge mode is not being flipped */
4176 if (mode == pf_sw->bridge_mode)
4177 continue;
4178 /* Iterates through the PF VSI list and update the loopback
4179 * mode of the VSI
4180 */
4181 ice_for_each_vsi(pf, v) {
4182 if (!pf->vsi[v])
4183 continue;
4184 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
4185 if (err)
4186 return err;
4187 }
4188
4189 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
4190 /* Update the unicast switch filter rules for the corresponding
4191 * switch of the netdev
4192 */
4193 status = ice_update_sw_rule_bridge_mode(hw);
4194 if (status) {
df17b7e0 4195 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
b1edc14a
MFIP
4196 mode, status, hw->adminq.sq_last_status);
4197 /* revert hw->evb_veb */
4198 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
4199 return -EIO;
4200 }
4201
4202 pf_sw->bridge_mode = mode;
4203 }
4204
4205 return 0;
4206}
4207
b3969fd7
SM
4208/**
4209 * ice_tx_timeout - Respond to a Tx Hang
4210 * @netdev: network interface device structure
4211 */
4212static void ice_tx_timeout(struct net_device *netdev)
4213{
4214 struct ice_netdev_priv *np = netdev_priv(netdev);
4215 struct ice_ring *tx_ring = NULL;
4216 struct ice_vsi *vsi = np->vsi;
4217 struct ice_pf *pf = vsi->back;
b3969fd7 4218 int hung_queue = -1;
807bc98d 4219 u32 i;
b3969fd7
SM
4220
4221 pf->tx_timeout_count++;
4222
bc0c6fab 4223 /* find the stopped queue the same way dev_watchdog() does */
b3969fd7 4224 for (i = 0; i < netdev->num_tx_queues; i++) {
b3969fd7 4225 unsigned long trans_start;
bc0c6fab 4226 struct netdev_queue *q;
b3969fd7
SM
4227
4228 q = netdev_get_tx_queue(netdev, i);
4229 trans_start = q->trans_start;
4230 if (netif_xmit_stopped(q) &&
4231 time_after(jiffies,
bc0c6fab 4232 trans_start + netdev->watchdog_timeo)) {
b3969fd7
SM
4233 hung_queue = i;
4234 break;
4235 }
4236 }
4237
bc0c6fab 4238 if (i == netdev->num_tx_queues)
b3969fd7 4239 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
bc0c6fab 4240 else
b3969fd7 4241 /* now that we have an index, find the tx_ring struct */
bc0c6fab
BA
4242 for (i = 0; i < vsi->num_txq; i++)
4243 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
4244 if (hung_queue == vsi->tx_rings[i]->q_index) {
b3969fd7
SM
4245 tx_ring = vsi->tx_rings[i];
4246 break;
4247 }
b3969fd7
SM
4248
4249 /* Reset recovery level if enough time has elapsed after last timeout.
4250 * Also ensure no new reset action happens before next timeout period.
4251 */
4252 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
4253 pf->tx_timeout_recovery_level = 1;
4254 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
4255 netdev->watchdog_timeo)))
4256 return;
4257
4258 if (tx_ring) {
807bc98d
BC
4259 struct ice_hw *hw = &pf->hw;
4260 u32 head, val = 0;
4261
4262 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
4263 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
b3969fd7 4264 /* Read interrupt register */
ba880734 4265 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
b3969fd7 4266
807bc98d 4267 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
b3969fd7 4268 vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
807bc98d 4269 head, tx_ring->next_to_use, val);
b3969fd7
SM
4270 }
4271
4272 pf->tx_timeout_last_recovery = jiffies;
4273 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
4274 pf->tx_timeout_recovery_level, hung_queue);
4275
4276 switch (pf->tx_timeout_recovery_level) {
4277 case 1:
4278 set_bit(__ICE_PFR_REQ, pf->state);
4279 break;
4280 case 2:
4281 set_bit(__ICE_CORER_REQ, pf->state);
4282 break;
4283 case 3:
4284 set_bit(__ICE_GLOBR_REQ, pf->state);
4285 break;
4286 default:
4287 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
4288 set_bit(__ICE_DOWN, pf->state);
4289 set_bit(__ICE_NEEDS_RESTART, vsi->state);
8d81fa55 4290 set_bit(__ICE_SERVICE_DIS, pf->state);
b3969fd7
SM
4291 break;
4292 }
4293
4294 ice_service_task_schedule(pf);
4295 pf->tx_timeout_recovery_level++;
4296}
4297
cdedef59
AV
4298/**
4299 * ice_open - Called when a network interface becomes active
4300 * @netdev: network interface device structure
4301 *
4302 * The open entry point is called when a network interface is made
df17b7e0 4303 * active by the system (IFF_UP). At this point all resources needed
cdedef59
AV
4304 * for transmit and receive operations are allocated, the interrupt
4305 * handler is registered with the OS, the netdev watchdog is enabled,
4306 * and the stack is notified that the interface is ready.
4307 *
4308 * Returns 0 on success, negative value on failure
4309 */
0e674aeb 4310int ice_open(struct net_device *netdev)
cdedef59
AV
4311{
4312 struct ice_netdev_priv *np = netdev_priv(netdev);
4313 struct ice_vsi *vsi = np->vsi;
6d599946 4314 struct ice_port_info *pi;
cdedef59
AV
4315 int err;
4316
0f9d5027
AV
4317 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
4318 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
4319 return -EIO;
4320 }
4321
cdedef59
AV
4322 netif_carrier_off(netdev);
4323
6d599946
TN
4324 pi = vsi->port_info;
4325 err = ice_update_link_info(pi);
b6f934f0 4326 if (err) {
6d599946
TN
4327 netdev_err(netdev, "Failed to get link info, error %d\n",
4328 err);
b6f934f0
BC
4329 return err;
4330 }
cdedef59 4331
6d599946
TN
4332 /* Set PHY if there is media, otherwise, turn off PHY */
4333 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
4334 err = ice_force_phys_link_state(vsi, true);
4335 if (err) {
4336 netdev_err(netdev,
4337 "Failed to set physical link up, error %d\n",
4338 err);
4339 return err;
4340 }
4341 } else {
4342 err = ice_aq_set_link_restart_an(pi, false, NULL);
4343 if (err) {
4344 netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
4345 vsi->vsi_num, err);
4346 return err;
4347 }
4348 set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
4349 }
4350
b6f934f0 4351 err = ice_vsi_open(vsi);
cdedef59
AV
4352 if (err)
4353 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
4354 vsi->vsi_num, vsi->vsw->sw_id);
4355 return err;
4356}
4357
4358/**
4359 * ice_stop - Disables a network interface
4360 * @netdev: network interface device structure
4361 *
4362 * The stop entry point is called when an interface is de-activated by the OS,
df17b7e0 4363 * and the netdevice enters the DOWN state. The hardware is still under the
cdedef59
AV
4364 * driver's control, but the netdev interface is disabled.
4365 *
4366 * Returns success only - not allowed to fail
4367 */
0e674aeb 4368int ice_stop(struct net_device *netdev)
cdedef59
AV
4369{
4370 struct ice_netdev_priv *np = netdev_priv(netdev);
4371 struct ice_vsi *vsi = np->vsi;
4372
4373 ice_vsi_close(vsi);
4374
4375 return 0;
4376}
4377
e94d4478
AV
4378/**
4379 * ice_features_check - Validate encapsulated packet conforms to limits
4380 * @skb: skb buffer
4381 * @netdev: This port's netdev
4382 * @features: Offload features that the stack believes apply
4383 */
4384static netdev_features_t
4385ice_features_check(struct sk_buff *skb,
4386 struct net_device __always_unused *netdev,
4387 netdev_features_t features)
4388{
4389 size_t len;
4390
4391 /* No point in doing any of this if neither checksum nor GSO are
df17b7e0 4392 * being requested for this frame. We can rule out both by just
e94d4478
AV
4393 * checking for CHECKSUM_PARTIAL
4394 */
4395 if (skb->ip_summed != CHECKSUM_PARTIAL)
4396 return features;
4397
4398 /* We cannot support GSO if the MSS is going to be less than
df17b7e0 4399 * 64 bytes. If it is then we need to drop support for GSO.
e94d4478
AV
4400 */
4401 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4402 features &= ~NETIF_F_GSO_MASK;
4403
4404 len = skb_network_header(skb) - skb->data;
4405 if (len & ~(ICE_TXD_MACLEN_MAX))
4406 goto out_rm_features;
4407
4408 len = skb_transport_header(skb) - skb_network_header(skb);
4409 if (len & ~(ICE_TXD_IPLEN_MAX))
4410 goto out_rm_features;
4411
4412 if (skb->encapsulation) {
4413 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4414 if (len & ~(ICE_TXD_L4LEN_MAX))
4415 goto out_rm_features;
4416
4417 len = skb_inner_transport_header(skb) -
4418 skb_inner_network_header(skb);
4419 if (len & ~(ICE_TXD_IPLEN_MAX))
4420 goto out_rm_features;
4421 }
4422
4423 return features;
4424out_rm_features:
4425 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4426}
4427
cdedef59
AV
4428static const struct net_device_ops ice_netdev_ops = {
4429 .ndo_open = ice_open,
4430 .ndo_stop = ice_stop,
2b245cb2 4431 .ndo_start_xmit = ice_start_xmit,
e94d4478
AV
4432 .ndo_features_check = ice_features_check,
4433 .ndo_set_rx_mode = ice_set_rx_mode,
4434 .ndo_set_mac_address = ice_set_mac_address,
4435 .ndo_validate_addr = eth_validate_addr,
4436 .ndo_change_mtu = ice_change_mtu,
fcea6f3d 4437 .ndo_get_stats64 = ice_get_stats64,
7c710869
AV
4438 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
4439 .ndo_set_vf_mac = ice_set_vf_mac,
4440 .ndo_get_vf_config = ice_get_vf_cfg,
4441 .ndo_set_vf_trust = ice_set_vf_trust,
4442 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
4443 .ndo_set_vf_link_state = ice_set_vf_link_state,
d76a60ba
AV
4444 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
4445 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
4446 .ndo_set_features = ice_set_features,
b1edc14a
MFIP
4447 .ndo_bridge_getlink = ice_bridge_getlink,
4448 .ndo_bridge_setlink = ice_bridge_setlink,
e94d4478
AV
4449 .ndo_fdb_add = ice_fdb_add,
4450 .ndo_fdb_del = ice_fdb_del,
b3969fd7 4451 .ndo_tx_timeout = ice_tx_timeout,
cdedef59 4452};