ice: Set carrier state and start/stop queues in rebuild
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_main.c
CommitLineData
837f08fd
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include "ice.h"
45d3d428 9#include "ice_lib.h"
837f08fd 10
5cc6c8b3 11#define DRV_VERSION "0.7.2-k"
837f08fd 12#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
fcea6f3d 13const char ice_drv_ver[] = DRV_VERSION;
837f08fd
AV
14static const char ice_driver_string[] = DRV_SUMMARY;
15static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
16
17MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
18MODULE_DESCRIPTION(DRV_SUMMARY);
98674ebe 19MODULE_LICENSE("GPL v2");
837f08fd
AV
20MODULE_VERSION(DRV_VERSION);
21
22static int debug = -1;
23module_param(debug, int, 0644);
7ec59eea
AV
24#ifndef CONFIG_DYNAMIC_DEBUG
25MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
26#else
27MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
28#endif /* !CONFIG_DYNAMIC_DEBUG */
837f08fd 29
940b61af 30static struct workqueue_struct *ice_wq;
cdedef59 31static const struct net_device_ops ice_netdev_ops;
940b61af 32
0b28b702
AV
33static void ice_pf_dis_all_vsi(struct ice_pf *pf);
34static void ice_rebuild(struct ice_pf *pf);
28c2a645 35
0f9d5027 36static void ice_vsi_release_all(struct ice_pf *pf);
fcea6f3d
AV
37static void ice_update_vsi_stats(struct ice_vsi *vsi);
38static void ice_update_pf_stats(struct ice_pf *pf);
3a858ba3 39
b3969fd7
SM
40/**
41 * ice_get_tx_pending - returns number of Tx descriptors not processed
42 * @ring: the ring of descriptors
43 */
44static u32 ice_get_tx_pending(struct ice_ring *ring)
45{
46 u32 head, tail;
47
48 head = ring->next_to_clean;
49 tail = readl(ring->tail);
50
51 if (head != tail)
52 return (head < tail) ?
53 tail - head : (tail + ring->count - head);
54 return 0;
55}
56
57/**
58 * ice_check_for_hang_subtask - check for and recover hung queues
59 * @pf: pointer to PF struct
60 */
61static void ice_check_for_hang_subtask(struct ice_pf *pf)
62{
63 struct ice_vsi *vsi = NULL;
64 unsigned int i;
65 u32 v, v_idx;
66 int packets;
67
68 ice_for_each_vsi(pf, v)
69 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
70 vsi = pf->vsi[v];
71 break;
72 }
73
74 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
75 return;
76
77 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
78 return;
79
80 for (i = 0; i < vsi->num_txq; i++) {
81 struct ice_ring *tx_ring = vsi->tx_rings[i];
82
83 if (tx_ring && tx_ring->desc) {
84 int itr = ICE_ITR_NONE;
85
86 /* If packet counter has not changed the queue is
87 * likely stalled, so force an interrupt for this
88 * queue.
89 *
90 * prev_pkt would be negative if there was no
91 * pending work.
92 */
93 packets = tx_ring->stats.pkts & INT_MAX;
94 if (tx_ring->tx_stats.prev_pkt == packets) {
95 /* Trigger sw interrupt to revive the queue */
96 v_idx = tx_ring->q_vector->v_idx;
97 wr32(&vsi->back->hw,
eb0208ec 98 GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
b3969fd7
SM
99 (itr << GLINT_DYN_CTL_ITR_INDX_S) |
100 GLINT_DYN_CTL_SWINT_TRIG_M |
101 GLINT_DYN_CTL_INTENA_MSK_M);
102 continue;
103 }
104
105 /* Memory barrier between read of packet count and call
106 * to ice_get_tx_pending()
107 */
108 smp_rmb();
109 tx_ring->tx_stats.prev_pkt =
110 ice_get_tx_pending(tx_ring) ? packets : -1;
111 }
112 }
113}
114
e94d4478
AV
115/**
116 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
117 * @netdev: the net device on which the sync is happening
118 * @addr: mac address to sync
119 *
120 * This is a callback function which is called by the in kernel device sync
121 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
122 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
123 * mac filters from the hardware.
124 */
125static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
126{
127 struct ice_netdev_priv *np = netdev_priv(netdev);
128 struct ice_vsi *vsi = np->vsi;
129
130 if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
131 return -EINVAL;
132
133 return 0;
134}
135
136/**
137 * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
138 * @netdev: the net device on which the unsync is happening
139 * @addr: mac address to unsync
140 *
141 * This is a callback function which is called by the in kernel device unsync
142 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
143 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
144 * delete the mac filters from the hardware.
145 */
146static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
147{
148 struct ice_netdev_priv *np = netdev_priv(netdev);
149 struct ice_vsi *vsi = np->vsi;
150
151 if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
152 return -EINVAL;
153
154 return 0;
155}
156
e94d4478
AV
157/**
158 * ice_vsi_fltr_changed - check if filter state changed
159 * @vsi: VSI to be checked
160 *
161 * returns true if filter state has changed, false otherwise.
162 */
163static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
164{
165 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
166 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
167 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
168}
169
170/**
171 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
172 * @vsi: ptr to the VSI
173 *
174 * Push any outstanding VSI filter changes through the AdminQ.
175 */
176static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
177{
178 struct device *dev = &vsi->back->pdev->dev;
179 struct net_device *netdev = vsi->netdev;
180 bool promisc_forced_on = false;
181 struct ice_pf *pf = vsi->back;
182 struct ice_hw *hw = &pf->hw;
183 enum ice_status status = 0;
184 u32 changed_flags = 0;
185 int err = 0;
186
187 if (!vsi->netdev)
188 return -EINVAL;
189
190 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
191 usleep_range(1000, 2000);
192
193 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
194 vsi->current_netdev_flags = vsi->netdev->flags;
195
196 INIT_LIST_HEAD(&vsi->tmp_sync_list);
197 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
198
199 if (ice_vsi_fltr_changed(vsi)) {
200 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
201 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
202 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
203
204 /* grab the netdev's addr_list_lock */
205 netif_addr_lock_bh(netdev);
206 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
207 ice_add_mac_to_unsync_list);
208 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
209 ice_add_mac_to_unsync_list);
210 /* our temp lists are populated. release lock */
211 netif_addr_unlock_bh(netdev);
212 }
213
214 /* Remove mac addresses in the unsync list */
215 status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
216 ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
217 if (status) {
218 netdev_err(netdev, "Failed to delete MAC filters\n");
219 /* if we failed because of alloc failures, just bail */
220 if (status == ICE_ERR_NO_MEMORY) {
221 err = -ENOMEM;
222 goto out;
223 }
224 }
225
226 /* Add mac addresses in the sync list */
227 status = ice_add_mac(hw, &vsi->tmp_sync_list);
228 ice_free_fltr_list(dev, &vsi->tmp_sync_list);
229 if (status) {
230 netdev_err(netdev, "Failed to add MAC filters\n");
231 /* If there is no more space for new umac filters, vsi
232 * should go into promiscuous mode. There should be some
233 * space reserved for promiscuous filters.
234 */
235 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
236 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
237 vsi->state)) {
238 promisc_forced_on = true;
239 netdev_warn(netdev,
240 "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
241 vsi->vsi_num);
242 } else {
243 err = -EIO;
244 goto out;
245 }
246 }
247 /* check for changes in promiscuous modes */
248 if (changed_flags & IFF_ALLMULTI)
249 netdev_warn(netdev, "Unsupported configuration\n");
250
251 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
252 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
253 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
254 if (vsi->current_netdev_flags & IFF_PROMISC) {
255 /* Apply TX filter rule to get traffic from VMs */
4fb33f31 256 status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
e94d4478
AV
257 ICE_FLTR_TX);
258 if (status) {
259 netdev_err(netdev, "Error setting default VSI %i tx rule\n",
260 vsi->vsi_num);
261 vsi->current_netdev_flags &= ~IFF_PROMISC;
262 err = -EIO;
263 goto out_promisc;
264 }
265 /* Apply RX filter rule to get traffic from wire */
4fb33f31 266 status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
e94d4478
AV
267 ICE_FLTR_RX);
268 if (status) {
269 netdev_err(netdev, "Error setting default VSI %i rx rule\n",
270 vsi->vsi_num);
271 vsi->current_netdev_flags &= ~IFF_PROMISC;
272 err = -EIO;
273 goto out_promisc;
274 }
275 } else {
276 /* Clear TX filter rule to stop traffic from VMs */
4fb33f31 277 status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
e94d4478
AV
278 ICE_FLTR_TX);
279 if (status) {
280 netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
281 vsi->vsi_num);
282 vsi->current_netdev_flags |= IFF_PROMISC;
283 err = -EIO;
284 goto out_promisc;
285 }
4fb33f31
AV
286 /* Clear RX filter to remove traffic from wire */
287 status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
e94d4478
AV
288 ICE_FLTR_RX);
289 if (status) {
290 netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
291 vsi->vsi_num);
292 vsi->current_netdev_flags |= IFF_PROMISC;
293 err = -EIO;
294 goto out_promisc;
295 }
296 }
297 }
298 goto exit;
299
300out_promisc:
301 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
302 goto exit;
303out:
304 /* if something went wrong then set the changed flag so we try again */
305 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
306 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
307exit:
308 clear_bit(__ICE_CFG_BUSY, vsi->state);
309 return err;
310}
311
312/**
313 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
314 * @pf: board private structure
315 */
316static void ice_sync_fltr_subtask(struct ice_pf *pf)
317{
318 int v;
319
320 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
321 return;
322
323 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
324
325 for (v = 0; v < pf->num_alloc_vsi; v++)
326 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
327 ice_vsi_sync_fltr(pf->vsi[v])) {
328 /* come back and try again later */
329 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
330 break;
331 }
332}
333
0b28b702
AV
334/**
335 * ice_prepare_for_reset - prep for the core to reset
336 * @pf: board private structure
337 *
338 * Inform or close all dependent features in prep for reset.
339 */
340static void
341ice_prepare_for_reset(struct ice_pf *pf)
342{
343 struct ice_hw *hw = &pf->hw;
0b28b702 344
007676b4
AV
345 /* Notify VFs of impending reset */
346 if (ice_check_sq_alive(hw, &hw->mailboxq))
347 ice_vc_notify_reset(pf);
348
0b28b702 349 /* disable the VSIs and their queues that are not already DOWN */
0b28b702
AV
350 ice_pf_dis_all_vsi(pf);
351
0b28b702 352 ice_shutdown_all_ctrlq(hw);
0f9d5027
AV
353
354 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
0b28b702
AV
355}
356
357/**
358 * ice_do_reset - Initiate one of many types of resets
359 * @pf: board private structure
360 * @reset_type: reset type requested
361 * before this function was called.
362 */
363static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
364{
365 struct device *dev = &pf->pdev->dev;
366 struct ice_hw *hw = &pf->hw;
367
368 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
369 WARN_ON(in_interrupt());
370
0f9d5027 371 ice_prepare_for_reset(pf);
0b28b702
AV
372
373 /* trigger the reset */
374 if (ice_reset(hw, reset_type)) {
375 dev_err(dev, "reset %d failed\n", reset_type);
376 set_bit(__ICE_RESET_FAILED, pf->state);
5df7e45d 377 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
0f9d5027 378 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
5df7e45d
DE
379 clear_bit(__ICE_PFR_REQ, pf->state);
380 clear_bit(__ICE_CORER_REQ, pf->state);
381 clear_bit(__ICE_GLOBR_REQ, pf->state);
0b28b702
AV
382 return;
383 }
384
0f9d5027
AV
385 /* PFR is a bit of a special case because it doesn't result in an OICR
386 * interrupt. So for PFR, rebuild after the reset and clear the reset-
387 * associated state bits.
388 */
0b28b702
AV
389 if (reset_type == ICE_RESET_PFR) {
390 pf->pfr_count++;
391 ice_rebuild(pf);
0f9d5027 392 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
5df7e45d 393 clear_bit(__ICE_PFR_REQ, pf->state);
0b28b702
AV
394 }
395}
396
397/**
398 * ice_reset_subtask - Set up for resetting the device and driver
399 * @pf: board private structure
400 */
401static void ice_reset_subtask(struct ice_pf *pf)
402{
0f9d5027 403 enum ice_reset_req reset_type = ICE_RESET_INVAL;
0b28b702
AV
404
405 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
0f9d5027
AV
406 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
407 * of reset is pending and sets bits in pf->state indicating the reset
5df7e45d 408 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
0f9d5027
AV
409 * prepare for pending reset if not already (for PF software-initiated
410 * global resets the software should already be prepared for it as
411 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
412 * by firmware or software on other PFs, that bit is not set so prepare
413 * for the reset now), poll for reset done, rebuild and return.
0b28b702 414 */
5df7e45d 415 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
0b28b702
AV
416 clear_bit(__ICE_GLOBR_RECV, pf->state);
417 clear_bit(__ICE_CORER_RECV, pf->state);
0f9d5027
AV
418 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
419 ice_prepare_for_reset(pf);
0b28b702
AV
420
421 /* make sure we are ready to rebuild */
fd2a9817 422 if (ice_check_reset(&pf->hw)) {
0b28b702 423 set_bit(__ICE_RESET_FAILED, pf->state);
fd2a9817
AV
424 } else {
425 /* done with reset. start rebuild */
426 pf->hw.reset_ongoing = false;
0b28b702 427 ice_rebuild(pf);
0f9d5027
AV
428 /* clear bit to resume normal operations, but
429 * ICE_NEEDS_RESTART bit is set incase rebuild failed
430 */
5df7e45d 431 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
0f9d5027 432 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
5df7e45d
DE
433 clear_bit(__ICE_PFR_REQ, pf->state);
434 clear_bit(__ICE_CORER_REQ, pf->state);
435 clear_bit(__ICE_GLOBR_REQ, pf->state);
fd2a9817 436 }
0f9d5027
AV
437
438 return;
0b28b702
AV
439 }
440
441 /* No pending resets to finish processing. Check for new resets */
5df7e45d 442 if (test_bit(__ICE_PFR_REQ, pf->state))
0f9d5027 443 reset_type = ICE_RESET_PFR;
5df7e45d 444 if (test_bit(__ICE_CORER_REQ, pf->state))
0f9d5027 445 reset_type = ICE_RESET_CORER;
5df7e45d 446 if (test_bit(__ICE_GLOBR_REQ, pf->state))
0b28b702 447 reset_type = ICE_RESET_GLOBR;
0f9d5027
AV
448 /* If no valid reset type requested just return */
449 if (reset_type == ICE_RESET_INVAL)
450 return;
0b28b702 451
0f9d5027 452 /* reset if not already down or busy */
0b28b702
AV
453 if (!test_bit(__ICE_DOWN, pf->state) &&
454 !test_bit(__ICE_CFG_BUSY, pf->state)) {
455 ice_do_reset(pf, reset_type);
456 }
0b28b702
AV
457}
458
cdedef59
AV
459/**
460 * ice_print_link_msg - print link up or down message
461 * @vsi: the VSI whose link status is being queried
462 * @isup: boolean for if the link is now up or down
463 */
fcea6f3d 464void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
cdedef59
AV
465{
466 const char *speed;
467 const char *fc;
468
469 if (vsi->current_isup == isup)
470 return;
471
472 vsi->current_isup = isup;
473
474 if (!isup) {
475 netdev_info(vsi->netdev, "NIC Link is Down\n");
476 return;
477 }
478
479 switch (vsi->port_info->phy.link_info.link_speed) {
480 case ICE_AQ_LINK_SPEED_40GB:
481 speed = "40 G";
482 break;
483 case ICE_AQ_LINK_SPEED_25GB:
484 speed = "25 G";
485 break;
486 case ICE_AQ_LINK_SPEED_20GB:
487 speed = "20 G";
488 break;
489 case ICE_AQ_LINK_SPEED_10GB:
490 speed = "10 G";
491 break;
492 case ICE_AQ_LINK_SPEED_5GB:
493 speed = "5 G";
494 break;
495 case ICE_AQ_LINK_SPEED_2500MB:
496 speed = "2.5 G";
497 break;
498 case ICE_AQ_LINK_SPEED_1000MB:
499 speed = "1 G";
500 break;
501 case ICE_AQ_LINK_SPEED_100MB:
502 speed = "100 M";
503 break;
504 default:
505 speed = "Unknown";
506 break;
507 }
508
509 switch (vsi->port_info->fc.current_mode) {
510 case ICE_FC_FULL:
511 fc = "RX/TX";
512 break;
513 case ICE_FC_TX_PAUSE:
514 fc = "TX";
515 break;
516 case ICE_FC_RX_PAUSE:
517 fc = "RX";
518 break;
519 default:
520 fc = "Unknown";
521 break;
522 }
523
524 netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
525 speed, fc);
526}
527
0b28b702
AV
528/**
529 * ice_vsi_link_event - update the vsi's netdev
530 * @vsi: the vsi on which the link event occurred
531 * @link_up: whether or not the vsi needs to be set up or down
532 */
533static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
534{
535 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
536 return;
537
538 if (vsi->type == ICE_VSI_PF) {
539 if (!vsi->netdev) {
540 dev_dbg(&vsi->back->pdev->dev,
541 "vsi->netdev is not initialized!\n");
542 return;
543 }
544 if (link_up) {
545 netif_carrier_on(vsi->netdev);
546 netif_tx_wake_all_queues(vsi->netdev);
547 } else {
548 netif_carrier_off(vsi->netdev);
549 netif_tx_stop_all_queues(vsi->netdev);
550 }
551 }
552}
553
554/**
555 * ice_link_event - process the link event
556 * @pf: pf that the link event is associated with
557 * @pi: port_info for the port that the link event is associated with
558 *
559 * Returns -EIO if ice_get_link_status() fails
560 * Returns 0 on success
561 */
562static int
563ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
564{
565 u8 new_link_speed, old_link_speed;
566 struct ice_phy_info *phy_info;
567 bool new_link_same_as_old;
568 bool new_link, old_link;
569 u8 lport;
570 u16 v;
571
572 phy_info = &pi->phy;
573 phy_info->link_info_old = phy_info->link_info;
574 /* Force ice_get_link_status() to update link info */
575 phy_info->get_link_info = true;
576
577 old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
578 old_link_speed = phy_info->link_info_old.link_speed;
579
580 lport = pi->lport;
581 if (ice_get_link_status(pi, &new_link)) {
582 dev_dbg(&pf->pdev->dev,
583 "Could not get link status for port %d\n", lport);
584 return -EIO;
585 }
586
587 new_link_speed = phy_info->link_info.link_speed;
588
589 new_link_same_as_old = (new_link == old_link &&
590 new_link_speed == old_link_speed);
591
592 ice_for_each_vsi(pf, v) {
593 struct ice_vsi *vsi = pf->vsi[v];
594
595 if (!vsi || !vsi->port_info)
596 continue;
597
598 if (new_link_same_as_old &&
599 (test_bit(__ICE_DOWN, vsi->state) ||
600 new_link == netif_carrier_ok(vsi->netdev)))
601 continue;
602
603 if (vsi->port_info->lport == lport) {
604 ice_print_link_msg(vsi, new_link);
605 ice_vsi_link_event(vsi, new_link);
606 }
607 }
608
53b8decb
AV
609 ice_vc_notify_link_state(pf);
610
0b28b702
AV
611 return 0;
612}
613
614/**
4f4be03b
AV
615 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
616 * @pf: board private structure
0b28b702 617 */
4f4be03b 618static void ice_watchdog_subtask(struct ice_pf *pf)
0b28b702 619{
4f4be03b 620 int i;
0b28b702 621
4f4be03b
AV
622 /* if interface is down do nothing */
623 if (test_bit(__ICE_DOWN, pf->state) ||
624 test_bit(__ICE_CFG_BUSY, pf->state))
625 return;
0b28b702 626
4f4be03b
AV
627 /* make sure we don't do these things too often */
628 if (time_before(jiffies,
629 pf->serv_tmr_prev + pf->serv_tmr_period))
630 return;
0b28b702 631
4f4be03b
AV
632 pf->serv_tmr_prev = jiffies;
633
634 if (ice_link_event(pf, pf->hw.port_info))
635 dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
636
637 /* Update the stats for active netdevs so the network stack
638 * can look at updated numbers whenever it cares to
639 */
640 ice_update_pf_stats(pf);
641 for (i = 0; i < pf->num_alloc_vsi; i++)
642 if (pf->vsi[i] && pf->vsi[i]->netdev)
643 ice_update_vsi_stats(pf->vsi[i]);
0b28b702
AV
644}
645
940b61af
AV
646/**
647 * __ice_clean_ctrlq - helper function to clean controlq rings
648 * @pf: ptr to struct ice_pf
649 * @q_type: specific Control queue type
650 */
651static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
652{
653 struct ice_rq_event_info event;
654 struct ice_hw *hw = &pf->hw;
655 struct ice_ctl_q_info *cq;
656 u16 pending, i = 0;
657 const char *qtype;
658 u32 oldval, val;
659
0b28b702
AV
660 /* Do not clean control queue if/when PF reset fails */
661 if (test_bit(__ICE_RESET_FAILED, pf->state))
662 return 0;
663
940b61af
AV
664 switch (q_type) {
665 case ICE_CTL_Q_ADMIN:
666 cq = &hw->adminq;
667 qtype = "Admin";
668 break;
75d2b253
AV
669 case ICE_CTL_Q_MAILBOX:
670 cq = &hw->mailboxq;
671 qtype = "Mailbox";
672 break;
940b61af
AV
673 default:
674 dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
675 q_type);
676 return 0;
677 }
678
679 /* check for error indications - PF_xx_AxQLEN register layout for
680 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
681 */
682 val = rd32(hw, cq->rq.len);
683 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
684 PF_FW_ARQLEN_ARQCRIT_M)) {
685 oldval = val;
686 if (val & PF_FW_ARQLEN_ARQVFE_M)
687 dev_dbg(&pf->pdev->dev,
688 "%s Receive Queue VF Error detected\n", qtype);
689 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
690 dev_dbg(&pf->pdev->dev,
691 "%s Receive Queue Overflow Error detected\n",
692 qtype);
693 }
694 if (val & PF_FW_ARQLEN_ARQCRIT_M)
695 dev_dbg(&pf->pdev->dev,
696 "%s Receive Queue Critical Error detected\n",
697 qtype);
698 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
699 PF_FW_ARQLEN_ARQCRIT_M);
700 if (oldval != val)
701 wr32(hw, cq->rq.len, val);
702 }
703
704 val = rd32(hw, cq->sq.len);
705 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
706 PF_FW_ATQLEN_ATQCRIT_M)) {
707 oldval = val;
708 if (val & PF_FW_ATQLEN_ATQVFE_M)
709 dev_dbg(&pf->pdev->dev,
710 "%s Send Queue VF Error detected\n", qtype);
711 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
712 dev_dbg(&pf->pdev->dev,
713 "%s Send Queue Overflow Error detected\n",
714 qtype);
715 }
716 if (val & PF_FW_ATQLEN_ATQCRIT_M)
717 dev_dbg(&pf->pdev->dev,
718 "%s Send Queue Critical Error detected\n",
719 qtype);
720 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
721 PF_FW_ATQLEN_ATQCRIT_M);
722 if (oldval != val)
723 wr32(hw, cq->sq.len, val);
724 }
725
726 event.buf_len = cq->rq_buf_size;
727 event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
728 GFP_KERNEL);
729 if (!event.msg_buf)
730 return 0;
731
732 do {
733 enum ice_status ret;
0b28b702 734 u16 opcode;
940b61af
AV
735
736 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
737 if (ret == ICE_ERR_AQ_NO_WORK)
738 break;
739 if (ret) {
740 dev_err(&pf->pdev->dev,
741 "%s Receive Queue event error %d\n", qtype,
742 ret);
743 break;
744 }
0b28b702
AV
745
746 opcode = le16_to_cpu(event.desc.opcode);
747
748 switch (opcode) {
1071a835
AV
749 case ice_mbx_opc_send_msg_to_pf:
750 ice_vc_process_vf_msg(pf, &event);
751 break;
8b97ceb1
HT
752 case ice_aqc_opc_fw_logging:
753 ice_output_fw_log(hw, &event.desc, event.msg_buf);
754 break;
0b28b702
AV
755 default:
756 dev_dbg(&pf->pdev->dev,
757 "%s Receive Queue unknown event 0x%04x ignored\n",
758 qtype, opcode);
759 break;
760 }
940b61af
AV
761 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
762
763 devm_kfree(&pf->pdev->dev, event.msg_buf);
764
765 return pending && (i == ICE_DFLT_IRQ_WORK);
766}
767
3d6b640e
AV
768/**
769 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
770 * @hw: pointer to hardware info
771 * @cq: control queue information
772 *
773 * returns true if there are pending messages in a queue, false if there aren't
774 */
775static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
776{
777 u16 ntu;
778
779 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
780 return cq->rq.next_to_clean != ntu;
781}
782
940b61af
AV
783/**
784 * ice_clean_adminq_subtask - clean the AdminQ rings
785 * @pf: board private structure
786 */
787static void ice_clean_adminq_subtask(struct ice_pf *pf)
788{
789 struct ice_hw *hw = &pf->hw;
940b61af
AV
790
791 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
792 return;
793
794 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
795 return;
796
797 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
798
3d6b640e
AV
799 /* There might be a situation where new messages arrive to a control
800 * queue between processing the last message and clearing the
801 * EVENT_PENDING bit. So before exiting, check queue head again (using
802 * ice_ctrlq_pending) and process new messages if any.
803 */
804 if (ice_ctrlq_pending(hw, &hw->adminq))
805 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
940b61af
AV
806
807 ice_flush(hw);
808}
809
75d2b253
AV
810/**
811 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
812 * @pf: board private structure
813 */
814static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
815{
816 struct ice_hw *hw = &pf->hw;
817
818 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
819 return;
820
821 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
822 return;
823
824 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
825
826 if (ice_ctrlq_pending(hw, &hw->mailboxq))
827 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
828
829 ice_flush(hw);
830}
831
940b61af
AV
832/**
833 * ice_service_task_schedule - schedule the service task to wake up
834 * @pf: board private structure
835 *
836 * If not already scheduled, this puts the task into the work queue.
837 */
838static void ice_service_task_schedule(struct ice_pf *pf)
839{
8d81fa55 840 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
0f9d5027
AV
841 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
842 !test_bit(__ICE_NEEDS_RESTART, pf->state))
940b61af
AV
843 queue_work(ice_wq, &pf->serv_task);
844}
845
846/**
847 * ice_service_task_complete - finish up the service task
848 * @pf: board private structure
849 */
850static void ice_service_task_complete(struct ice_pf *pf)
851{
852 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
853
854 /* force memory (pf->state) to sync before next service task */
855 smp_mb__before_atomic();
856 clear_bit(__ICE_SERVICE_SCHED, pf->state);
857}
858
8d81fa55
AA
859/**
860 * ice_service_task_stop - stop service task and cancel works
861 * @pf: board private structure
862 */
863static void ice_service_task_stop(struct ice_pf *pf)
864{
865 set_bit(__ICE_SERVICE_DIS, pf->state);
866
867 if (pf->serv_tmr.function)
868 del_timer_sync(&pf->serv_tmr);
869 if (pf->serv_task.func)
870 cancel_work_sync(&pf->serv_task);
871
872 clear_bit(__ICE_SERVICE_SCHED, pf->state);
873}
874
940b61af
AV
875/**
876 * ice_service_timer - timer callback to schedule service task
877 * @t: pointer to timer_list
878 */
879static void ice_service_timer(struct timer_list *t)
880{
881 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
882
883 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
884 ice_service_task_schedule(pf);
885}
886
b3969fd7
SM
887/**
888 * ice_handle_mdd_event - handle malicious driver detect event
889 * @pf: pointer to the PF structure
890 *
891 * Called from service task. OICR interrupt handler indicates MDD event
892 */
893static void ice_handle_mdd_event(struct ice_pf *pf)
894{
895 struct ice_hw *hw = &pf->hw;
896 bool mdd_detected = false;
897 u32 reg;
7c4bc1f5 898 int i;
b3969fd7
SM
899
900 if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
901 return;
902
903 /* find what triggered the MDD event */
904 reg = rd32(hw, GL_MDET_TX_PQM);
905 if (reg & GL_MDET_TX_PQM_VALID_M) {
906 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
907 GL_MDET_TX_PQM_PF_NUM_S;
908 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
909 GL_MDET_TX_PQM_VF_NUM_S;
910 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
911 GL_MDET_TX_PQM_MAL_TYPE_S;
912 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
913 GL_MDET_TX_PQM_QNUM_S);
914
915 if (netif_msg_tx_err(pf))
916 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
917 event, queue, pf_num, vf_num);
918 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
919 mdd_detected = true;
920 }
921
922 reg = rd32(hw, GL_MDET_TX_TCLAN);
923 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
924 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
925 GL_MDET_TX_TCLAN_PF_NUM_S;
926 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
927 GL_MDET_TX_TCLAN_VF_NUM_S;
928 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
929 GL_MDET_TX_TCLAN_MAL_TYPE_S;
930 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
931 GL_MDET_TX_TCLAN_QNUM_S);
932
933 if (netif_msg_rx_err(pf))
934 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
935 event, queue, pf_num, vf_num);
936 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
937 mdd_detected = true;
938 }
939
940 reg = rd32(hw, GL_MDET_RX);
941 if (reg & GL_MDET_RX_VALID_M) {
942 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
943 GL_MDET_RX_PF_NUM_S;
944 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
945 GL_MDET_RX_VF_NUM_S;
946 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
947 GL_MDET_RX_MAL_TYPE_S;
948 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
949 GL_MDET_RX_QNUM_S);
950
951 if (netif_msg_rx_err(pf))
952 dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
953 event, queue, pf_num, vf_num);
954 wr32(hw, GL_MDET_RX, 0xffffffff);
955 mdd_detected = true;
956 }
957
958 if (mdd_detected) {
959 bool pf_mdd_detected = false;
960
961 reg = rd32(hw, PF_MDET_TX_PQM);
962 if (reg & PF_MDET_TX_PQM_VALID_M) {
963 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
964 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
965 pf_mdd_detected = true;
966 }
967
968 reg = rd32(hw, PF_MDET_TX_TCLAN);
969 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
970 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
971 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
972 pf_mdd_detected = true;
973 }
974
975 reg = rd32(hw, PF_MDET_RX);
976 if (reg & PF_MDET_RX_VALID_M) {
977 wr32(hw, PF_MDET_RX, 0xFFFF);
978 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
979 pf_mdd_detected = true;
980 }
981 /* Queue belongs to the PF initiate a reset */
982 if (pf_mdd_detected) {
983 set_bit(__ICE_NEEDS_RESTART, pf->state);
984 ice_service_task_schedule(pf);
985 }
986 }
987
7c4bc1f5
AV
988 /* see if one of the VFs needs to be reset */
989 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
990 struct ice_vf *vf = &pf->vf[i];
991
992 reg = rd32(hw, VP_MDET_TX_PQM(i));
993 if (reg & VP_MDET_TX_PQM_VALID_M) {
994 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
995 vf->num_mdd_events++;
996 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
997 i);
998 }
999
1000 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1001 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1002 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1003 vf->num_mdd_events++;
1004 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1005 i);
1006 }
1007
1008 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1009 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1010 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1011 vf->num_mdd_events++;
1012 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
1013 i);
1014 }
1015
1016 reg = rd32(hw, VP_MDET_RX(i));
1017 if (reg & VP_MDET_RX_VALID_M) {
1018 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1019 vf->num_mdd_events++;
1020 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
1021 i);
1022 }
1023
1024 if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
1025 dev_info(&pf->pdev->dev,
1026 "Too many MDD events on VF %d, disabled\n", i);
1027 dev_info(&pf->pdev->dev,
1028 "Use PF Control I/F to re-enable the VF\n");
1029 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1030 }
1031 }
1032
b3969fd7
SM
1033 /* re-enable MDD interrupt cause */
1034 clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1035 reg = rd32(hw, PFINT_OICR_ENA);
1036 reg |= PFINT_OICR_MAL_DETECT_M;
1037 wr32(hw, PFINT_OICR_ENA, reg);
1038 ice_flush(hw);
1039}
1040
940b61af
AV
1041/**
1042 * ice_service_task - manage and run subtasks
1043 * @work: pointer to work_struct contained by the PF struct
1044 */
1045static void ice_service_task(struct work_struct *work)
1046{
1047 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
1048 unsigned long start_time = jiffies;
1049
1050 /* subtasks */
0b28b702
AV
1051
1052 /* process reset requests first */
1053 ice_reset_subtask(pf);
1054
0f9d5027 1055 /* bail if a reset/recovery cycle is pending or rebuild failed */
5df7e45d 1056 if (ice_is_reset_in_progress(pf->state) ||
0f9d5027
AV
1057 test_bit(__ICE_SUSPENDED, pf->state) ||
1058 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
0b28b702
AV
1059 ice_service_task_complete(pf);
1060 return;
1061 }
1062
b3969fd7 1063 ice_check_for_hang_subtask(pf);
e94d4478 1064 ice_sync_fltr_subtask(pf);
b3969fd7 1065 ice_handle_mdd_event(pf);
007676b4 1066 ice_process_vflr_event(pf);
fcea6f3d 1067 ice_watchdog_subtask(pf);
940b61af 1068 ice_clean_adminq_subtask(pf);
75d2b253 1069 ice_clean_mailboxq_subtask(pf);
940b61af
AV
1070
1071 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
1072 ice_service_task_complete(pf);
1073
1074 /* If the tasks have taken longer than one service timer period
1075 * or there is more work to be done, reset the service timer to
1076 * schedule the service task now.
1077 */
1078 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
b3969fd7 1079 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
007676b4 1080 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
75d2b253 1081 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
940b61af
AV
1082 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1083 mod_timer(&pf->serv_tmr, jiffies);
1084}
1085
f31e4b6f
AV
1086/**
1087 * ice_set_ctrlq_len - helper function to set controlq length
1088 * @hw: pointer to the hw instance
1089 */
1090static void ice_set_ctrlq_len(struct ice_hw *hw)
1091{
1092 hw->adminq.num_rq_entries = ICE_AQ_LEN;
1093 hw->adminq.num_sq_entries = ICE_AQ_LEN;
1094 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
1095 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
75d2b253
AV
1096 hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
1097 hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
1098 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
1099 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
f31e4b6f
AV
1100}
1101
cdedef59
AV
1102/**
1103 * ice_irq_affinity_notify - Callback for affinity changes
1104 * @notify: context as to what irq was changed
1105 * @mask: the new affinity mask
1106 *
1107 * This is a callback function used by the irq_set_affinity_notifier function
1108 * so that we may register to receive changes to the irq affinity masks.
1109 */
1110static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1111 const cpumask_t *mask)
1112{
1113 struct ice_q_vector *q_vector =
1114 container_of(notify, struct ice_q_vector, affinity_notify);
1115
1116 cpumask_copy(&q_vector->affinity_mask, mask);
1117}
1118
1119/**
1120 * ice_irq_affinity_release - Callback for affinity notifier release
1121 * @ref: internal core kernel usage
1122 *
1123 * This is a callback function used by the irq_set_affinity_notifier function
1124 * to inform the current notification subscriber that they will no longer
1125 * receive notifications.
1126 */
1127static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1128
cdedef59
AV
1129/**
1130 * ice_vsi_ena_irq - Enable IRQ for the given VSI
1131 * @vsi: the VSI being configured
1132 */
1133static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1134{
1135 struct ice_pf *pf = vsi->back;
1136 struct ice_hw *hw = &pf->hw;
1137
1138 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1139 int i;
1140
1141 for (i = 0; i < vsi->num_q_vectors; i++)
1142 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
1143 }
1144
1145 ice_flush(hw);
1146 return 0;
1147}
1148
cdedef59
AV
1149/**
1150 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
1151 * @vsi: the VSI being configured
1152 * @basename: name for the vector
1153 */
1154static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
1155{
1156 int q_vectors = vsi->num_q_vectors;
1157 struct ice_pf *pf = vsi->back;
eb0208ec 1158 int base = vsi->sw_base_vector;
cdedef59
AV
1159 int rx_int_idx = 0;
1160 int tx_int_idx = 0;
1161 int vector, err;
1162 int irq_num;
1163
1164 for (vector = 0; vector < q_vectors; vector++) {
1165 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
1166
1167 irq_num = pf->msix_entries[base + vector].vector;
1168
1169 if (q_vector->tx.ring && q_vector->rx.ring) {
1170 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1171 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
1172 tx_int_idx++;
1173 } else if (q_vector->rx.ring) {
1174 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1175 "%s-%s-%d", basename, "rx", rx_int_idx++);
1176 } else if (q_vector->tx.ring) {
1177 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1178 "%s-%s-%d", basename, "tx", tx_int_idx++);
1179 } else {
1180 /* skip this unused q_vector */
1181 continue;
1182 }
1183 err = devm_request_irq(&pf->pdev->dev,
1184 pf->msix_entries[base + vector].vector,
1185 vsi->irq_handler, 0, q_vector->name,
1186 q_vector);
1187 if (err) {
1188 netdev_err(vsi->netdev,
1189 "MSIX request_irq failed, error: %d\n", err);
1190 goto free_q_irqs;
1191 }
1192
1193 /* register for affinity change notifications */
1194 q_vector->affinity_notify.notify = ice_irq_affinity_notify;
1195 q_vector->affinity_notify.release = ice_irq_affinity_release;
1196 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
1197
1198 /* assign the mask for this irq */
1199 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
1200 }
1201
1202 vsi->irqs_ready = true;
1203 return 0;
1204
1205free_q_irqs:
1206 while (vector) {
1207 vector--;
1208 irq_num = pf->msix_entries[base + vector].vector,
1209 irq_set_affinity_notifier(irq_num, NULL);
1210 irq_set_affinity_hint(irq_num, NULL);
1211 devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
1212 }
1213 return err;
1214}
1215
940b61af
AV
1216/**
1217 * ice_ena_misc_vector - enable the non-queue interrupts
1218 * @pf: board private structure
1219 */
1220static void ice_ena_misc_vector(struct ice_pf *pf)
1221{
1222 struct ice_hw *hw = &pf->hw;
1223 u32 val;
1224
1225 /* clear things first */
1226 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1227 rd32(hw, PFINT_OICR); /* read to clear */
1228
3bcd7fa3 1229 val = (PFINT_OICR_ECC_ERR_M |
940b61af
AV
1230 PFINT_OICR_MAL_DETECT_M |
1231 PFINT_OICR_GRST_M |
1232 PFINT_OICR_PCI_EXCEPTION_M |
007676b4 1233 PFINT_OICR_VFLR_M |
3bcd7fa3
BA
1234 PFINT_OICR_HMC_ERR_M |
1235 PFINT_OICR_PE_CRITERR_M);
940b61af
AV
1236
1237 wr32(hw, PFINT_OICR_ENA, val);
1238
1239 /* SW_ITR_IDX = 0, but don't change INTENA */
eb0208ec 1240 wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx),
940b61af
AV
1241 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
1242}
1243
1244/**
1245 * ice_misc_intr - misc interrupt handler
1246 * @irq: interrupt number
1247 * @data: pointer to a q_vector
1248 */
1249static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
1250{
1251 struct ice_pf *pf = (struct ice_pf *)data;
1252 struct ice_hw *hw = &pf->hw;
1253 irqreturn_t ret = IRQ_NONE;
1254 u32 oicr, ena_mask;
1255
1256 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
75d2b253 1257 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
940b61af
AV
1258
1259 oicr = rd32(hw, PFINT_OICR);
1260 ena_mask = rd32(hw, PFINT_OICR_ENA);
1261
b3969fd7
SM
1262 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1263 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
1264 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
1265 }
007676b4
AV
1266 if (oicr & PFINT_OICR_VFLR_M) {
1267 ena_mask &= ~PFINT_OICR_VFLR_M;
1268 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
1269 }
b3969fd7 1270
0b28b702
AV
1271 if (oicr & PFINT_OICR_GRST_M) {
1272 u32 reset;
b3969fd7 1273
0b28b702
AV
1274 /* we have a reset warning */
1275 ena_mask &= ~PFINT_OICR_GRST_M;
1276 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
1277 GLGEN_RSTAT_RESET_TYPE_S;
1278
1279 if (reset == ICE_RESET_CORER)
1280 pf->corer_count++;
1281 else if (reset == ICE_RESET_GLOBR)
1282 pf->globr_count++;
ca4929b6 1283 else if (reset == ICE_RESET_EMPR)
0b28b702 1284 pf->empr_count++;
ca4929b6
BC
1285 else
1286 dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n",
1287 reset);
0b28b702
AV
1288
1289 /* If a reset cycle isn't already in progress, we set a bit in
1290 * pf->state so that the service task can start a reset/rebuild.
1291 * We also make note of which reset happened so that peer
1292 * devices/drivers can be informed.
1293 */
5df7e45d 1294 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
0b28b702
AV
1295 if (reset == ICE_RESET_CORER)
1296 set_bit(__ICE_CORER_RECV, pf->state);
1297 else if (reset == ICE_RESET_GLOBR)
1298 set_bit(__ICE_GLOBR_RECV, pf->state);
1299 else
1300 set_bit(__ICE_EMPR_RECV, pf->state);
1301
fd2a9817
AV
1302 /* There are couple of different bits at play here.
1303 * hw->reset_ongoing indicates whether the hardware is
1304 * in reset. This is set to true when a reset interrupt
1305 * is received and set back to false after the driver
1306 * has determined that the hardware is out of reset.
1307 *
5df7e45d 1308 * __ICE_RESET_OICR_RECV in pf->state indicates
fd2a9817
AV
1309 * that a post reset rebuild is required before the
1310 * driver is operational again. This is set above.
1311 *
1312 * As this is the start of the reset/rebuild cycle, set
1313 * both to indicate that.
1314 */
1315 hw->reset_ongoing = true;
0b28b702
AV
1316 }
1317 }
1318
940b61af
AV
1319 if (oicr & PFINT_OICR_HMC_ERR_M) {
1320 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
1321 dev_dbg(&pf->pdev->dev,
1322 "HMC Error interrupt - info 0x%x, data 0x%x\n",
1323 rd32(hw, PFHMC_ERRORINFO),
1324 rd32(hw, PFHMC_ERRORDATA));
1325 }
1326
1327 /* Report and mask off any remaining unexpected interrupts */
1328 oicr &= ena_mask;
1329 if (oicr) {
1330 dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
1331 oicr);
1332 /* If a critical error is pending there is no choice but to
1333 * reset the device.
1334 */
1335 if (oicr & (PFINT_OICR_PE_CRITERR_M |
1336 PFINT_OICR_PCI_EXCEPTION_M |
0b28b702 1337 PFINT_OICR_ECC_ERR_M)) {
940b61af 1338 set_bit(__ICE_PFR_REQ, pf->state);
0b28b702
AV
1339 ice_service_task_schedule(pf);
1340 }
940b61af
AV
1341 ena_mask &= ~oicr;
1342 }
1343 ret = IRQ_HANDLED;
1344
940b61af
AV
1345 /* re-enable interrupt causes that are not handled during this pass */
1346 wr32(hw, PFINT_OICR_ENA, ena_mask);
1347 if (!test_bit(__ICE_DOWN, pf->state)) {
1348 ice_service_task_schedule(pf);
cdedef59 1349 ice_irq_dynamic_ena(hw, NULL, NULL);
940b61af
AV
1350 }
1351
1352 return ret;
1353}
1354
1355/**
1356 * ice_free_irq_msix_misc - Unroll misc vector setup
1357 * @pf: board private structure
1358 */
1359static void ice_free_irq_msix_misc(struct ice_pf *pf)
1360{
1361 /* disable OICR interrupt */
1362 wr32(&pf->hw, PFINT_OICR_ENA, 0);
1363 ice_flush(&pf->hw);
1364
1365 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
eb0208ec 1366 synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector);
940b61af 1367 devm_free_irq(&pf->pdev->dev,
eb0208ec 1368 pf->msix_entries[pf->sw_oicr_idx].vector, pf);
940b61af
AV
1369 }
1370
eb0208ec
PB
1371 pf->num_avail_sw_msix += 1;
1372 ice_free_res(pf->sw_irq_tracker, pf->sw_oicr_idx, ICE_RES_MISC_VEC_ID);
1373 pf->num_avail_hw_msix += 1;
1374 ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID);
940b61af
AV
1375}
1376
1377/**
1378 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
1379 * @pf: board private structure
1380 *
1381 * This sets up the handler for MSIX 0, which is used to manage the
1382 * non-queue interrupts, e.g. AdminQ and errors. This is not used
1383 * when in MSI or Legacy interrupt mode.
1384 */
1385static int ice_req_irq_msix_misc(struct ice_pf *pf)
1386{
1387 struct ice_hw *hw = &pf->hw;
1388 int oicr_idx, err = 0;
1389 u8 itr_gran;
1390 u32 val;
1391
1392 if (!pf->int_name[0])
1393 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
1394 dev_driver_string(&pf->pdev->dev),
1395 dev_name(&pf->pdev->dev));
1396
0b28b702
AV
1397 /* Do not request IRQ but do enable OICR interrupt since settings are
1398 * lost during reset. Note that this function is called only during
1399 * rebuild path and not while reset is in progress.
1400 */
5df7e45d 1401 if (ice_is_reset_in_progress(pf->state))
0b28b702
AV
1402 goto skip_req_irq;
1403
eb0208ec
PB
1404 /* reserve one vector in sw_irq_tracker for misc interrupts */
1405 oicr_idx = ice_get_res(pf, pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
940b61af
AV
1406 if (oicr_idx < 0)
1407 return oicr_idx;
1408
eb0208ec
PB
1409 pf->num_avail_sw_msix -= 1;
1410 pf->sw_oicr_idx = oicr_idx;
1411
1412 /* reserve one vector in hw_irq_tracker for misc interrupts */
1413 oicr_idx = ice_get_res(pf, pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1414 if (oicr_idx < 0) {
1415 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1416 pf->num_avail_sw_msix += 1;
1417 return oicr_idx;
1418 }
1419 pf->num_avail_hw_msix -= 1;
1420 pf->hw_oicr_idx = oicr_idx;
940b61af
AV
1421
1422 err = devm_request_irq(&pf->pdev->dev,
eb0208ec 1423 pf->msix_entries[pf->sw_oicr_idx].vector,
940b61af
AV
1424 ice_misc_intr, 0, pf->int_name, pf);
1425 if (err) {
1426 dev_err(&pf->pdev->dev,
1427 "devm_request_irq for %s failed: %d\n",
1428 pf->int_name, err);
eb0208ec
PB
1429 ice_free_res(pf->sw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1430 pf->num_avail_sw_msix += 1;
1431 ice_free_res(pf->hw_irq_tracker, 1, ICE_RES_MISC_VEC_ID);
1432 pf->num_avail_hw_msix += 1;
940b61af
AV
1433 return err;
1434 }
1435
0b28b702 1436skip_req_irq:
940b61af
AV
1437 ice_ena_misc_vector(pf);
1438
eb0208ec 1439 val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
4381147d 1440 PFINT_OICR_CTL_CAUSE_ENA_M);
940b61af
AV
1441 wr32(hw, PFINT_OICR_CTL, val);
1442
1443 /* This enables Admin queue Interrupt causes */
eb0208ec 1444 val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
4381147d 1445 PFINT_FW_CTL_CAUSE_ENA_M);
940b61af
AV
1446 wr32(hw, PFINT_FW_CTL, val);
1447
75d2b253
AV
1448 /* This enables Mailbox queue Interrupt causes */
1449 val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
1450 PFINT_MBX_CTL_CAUSE_ENA_M);
1451 wr32(hw, PFINT_MBX_CTL, val);
1452
9e4ab4c2 1453 itr_gran = hw->itr_gran;
940b61af 1454
eb0208ec 1455 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx),
940b61af
AV
1456 ITR_TO_REG(ICE_ITR_8K, itr_gran));
1457
1458 ice_flush(hw);
cdedef59 1459 ice_irq_dynamic_ena(hw, NULL, NULL);
940b61af
AV
1460
1461 return 0;
1462}
1463
3a858ba3 1464/**
df0f8479
AV
1465 * ice_napi_del - Remove NAPI handler for the VSI
1466 * @vsi: VSI for which NAPI handler is to be removed
3a858ba3 1467 */
df0f8479 1468static void ice_napi_del(struct ice_vsi *vsi)
3a858ba3 1469{
df0f8479 1470 int v_idx;
3a858ba3 1471
df0f8479
AV
1472 if (!vsi->netdev)
1473 return;
3a858ba3 1474
df0f8479
AV
1475 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
1476 netif_napi_del(&vsi->q_vectors[v_idx]->napi);
3a858ba3
AV
1477}
1478
1479/**
df0f8479
AV
1480 * ice_napi_add - register NAPI handler for the VSI
1481 * @vsi: VSI for which NAPI handler is to be registered
3a858ba3 1482 *
df0f8479
AV
1483 * This function is only called in the driver's load path. Registering the NAPI
1484 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
1485 * reset/rebuild, etc.)
3a858ba3 1486 */
df0f8479 1487static void ice_napi_add(struct ice_vsi *vsi)
3a858ba3 1488{
df0f8479 1489 int v_idx;
3a858ba3 1490
df0f8479 1491 if (!vsi->netdev)
3a858ba3 1492 return;
3a858ba3
AV
1493
1494 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
df0f8479
AV
1495 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
1496 ice_napi_poll, NAPI_POLL_WEIGHT);
3a858ba3
AV
1497}
1498
1499/**
df0f8479
AV
1500 * ice_cfg_netdev - Allocate, configure and register a netdev
1501 * @vsi: the VSI associated with the new netdev
3a858ba3
AV
1502 *
1503 * Returns 0 on success, negative value on failure
1504 */
1505static int ice_cfg_netdev(struct ice_vsi *vsi)
1506{
d76a60ba
AV
1507 netdev_features_t csumo_features;
1508 netdev_features_t vlano_features;
1509 netdev_features_t dflt_features;
1510 netdev_features_t tso_features;
3a858ba3
AV
1511 struct ice_netdev_priv *np;
1512 struct net_device *netdev;
1513 u8 mac_addr[ETH_ALEN];
df0f8479 1514 int err;
3a858ba3
AV
1515
1516 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
1517 vsi->alloc_txq, vsi->alloc_rxq);
1518 if (!netdev)
1519 return -ENOMEM;
1520
1521 vsi->netdev = netdev;
1522 np = netdev_priv(netdev);
1523 np->vsi = vsi;
1524
d76a60ba
AV
1525 dflt_features = NETIF_F_SG |
1526 NETIF_F_HIGHDMA |
1527 NETIF_F_RXHASH;
1528
1529 csumo_features = NETIF_F_RXCSUM |
1530 NETIF_F_IP_CSUM |
1531 NETIF_F_IPV6_CSUM;
1532
1533 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
1534 NETIF_F_HW_VLAN_CTAG_TX |
1535 NETIF_F_HW_VLAN_CTAG_RX;
1536
1537 tso_features = NETIF_F_TSO;
1538
3a858ba3 1539 /* set features that user can change */
d76a60ba
AV
1540 netdev->hw_features = dflt_features | csumo_features |
1541 vlano_features | tso_features;
3a858ba3
AV
1542
1543 /* enable features */
1544 netdev->features |= netdev->hw_features;
d76a60ba
AV
1545 /* encap and VLAN devices inherit default, csumo and tso features */
1546 netdev->hw_enc_features |= dflt_features | csumo_features |
1547 tso_features;
1548 netdev->vlan_features |= dflt_features | csumo_features |
1549 tso_features;
3a858ba3
AV
1550
1551 if (vsi->type == ICE_VSI_PF) {
1552 SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
1553 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
1554
1555 ether_addr_copy(netdev->dev_addr, mac_addr);
1556 ether_addr_copy(netdev->perm_addr, mac_addr);
1557 }
1558
1559 netdev->priv_flags |= IFF_UNICAST_FLT;
1560
cdedef59
AV
1561 /* assign netdev_ops */
1562 netdev->netdev_ops = &ice_netdev_ops;
1563
3a858ba3
AV
1564 /* setup watchdog timeout value to be 5 second */
1565 netdev->watchdog_timeo = 5 * HZ;
1566
fcea6f3d
AV
1567 ice_set_ethtool_ops(netdev);
1568
3a858ba3
AV
1569 netdev->min_mtu = ETH_MIN_MTU;
1570 netdev->max_mtu = ICE_MAX_MTU;
1571
df0f8479
AV
1572 err = register_netdev(vsi->netdev);
1573 if (err)
1574 return err;
3a858ba3 1575
df0f8479 1576 netif_carrier_off(vsi->netdev);
3a858ba3 1577
df0f8479
AV
1578 /* make sure transmit queues start off as stopped */
1579 netif_tx_stop_all_queues(vsi->netdev);
3a858ba3
AV
1580
1581 return 0;
1582}
1583
d76a60ba
AV
1584/**
1585 * ice_fill_rss_lut - Fill the RSS lookup table with default values
1586 * @lut: Lookup table
1587 * @rss_table_size: Lookup table size
1588 * @rss_size: Range of queue number for hashing
1589 */
1590void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
1591{
1592 u16 i;
1593
1594 for (i = 0; i < rss_table_size; i++)
1595 lut[i] = i % rss_size;
1596}
1597
0f9d5027
AV
1598/**
1599 * ice_pf_vsi_setup - Set up a PF VSI
1600 * @pf: board private structure
1601 * @pi: pointer to the port_info instance
1602 *
1603 * Returns pointer to the successfully allocated VSI sw struct on success,
1604 * otherwise returns NULL on failure.
1605 */
1606static struct ice_vsi *
1607ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
1608{
1609 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
1610}
1611
d76a60ba
AV
1612/**
1613 * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
1614 * @netdev: network interface to be adjusted
1615 * @proto: unused protocol
1616 * @vid: vlan id to be added
1617 *
1618 * net_device_ops implementation for adding vlan ids
1619 */
1620static int ice_vlan_rx_add_vid(struct net_device *netdev,
1621 __always_unused __be16 proto, u16 vid)
1622{
1623 struct ice_netdev_priv *np = netdev_priv(netdev);
1624 struct ice_vsi *vsi = np->vsi;
4f74dcc1 1625 int ret;
d76a60ba
AV
1626
1627 if (vid >= VLAN_N_VID) {
1628 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
1629 vid, VLAN_N_VID);
1630 return -EINVAL;
1631 }
1632
1633 if (vsi->info.pvid)
1634 return -EINVAL;
1635
4f74dcc1
BC
1636 /* Enable VLAN pruning when VLAN 0 is added */
1637 if (unlikely(!vid)) {
1638 ret = ice_cfg_vlan_pruning(vsi, true);
1639 if (ret)
1640 return ret;
1641 }
1642
d76a60ba
AV
1643 /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
1644 * needed to continue allowing all untagged packets since VLAN prune
1645 * list is applied to all packets by the switch
1646 */
1647 ret = ice_vsi_add_vlan(vsi, vid);
1648
1649 if (!ret)
1650 set_bit(vid, vsi->active_vlans);
1651
1652 return ret;
1653}
1654
d76a60ba
AV
1655/**
1656 * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1657 * @netdev: network interface to be adjusted
1658 * @proto: unused protocol
1659 * @vid: vlan id to be removed
1660 *
1661 * net_device_ops implementation for removing vlan ids
1662 */
1663static int ice_vlan_rx_kill_vid(struct net_device *netdev,
1664 __always_unused __be16 proto, u16 vid)
1665{
1666 struct ice_netdev_priv *np = netdev_priv(netdev);
1667 struct ice_vsi *vsi = np->vsi;
4f74dcc1 1668 int status;
d76a60ba
AV
1669
1670 if (vsi->info.pvid)
1671 return -EINVAL;
1672
4f74dcc1
BC
1673 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
1674 * information
d76a60ba 1675 */
4f74dcc1
BC
1676 status = ice_vsi_kill_vlan(vsi, vid);
1677 if (status)
1678 return status;
d76a60ba
AV
1679
1680 clear_bit(vid, vsi->active_vlans);
1681
4f74dcc1
BC
1682 /* Disable VLAN pruning when VLAN 0 is removed */
1683 if (unlikely(!vid))
1684 status = ice_cfg_vlan_pruning(vsi, false);
1685
1686 return status;
d76a60ba
AV
1687}
1688
3a858ba3
AV
1689/**
1690 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
1691 * @pf: board private structure
1692 *
1693 * Returns 0 on success, negative value on failure
1694 */
1695static int ice_setup_pf_sw(struct ice_pf *pf)
1696{
9daf8208
AV
1697 LIST_HEAD(tmp_add_list);
1698 u8 broadcast[ETH_ALEN];
3a858ba3
AV
1699 struct ice_vsi *vsi;
1700 int status = 0;
1701
5df7e45d 1702 if (ice_is_reset_in_progress(pf->state))
0f9d5027
AV
1703 return -EBUSY;
1704
1705 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
1706 if (!vsi) {
1707 status = -ENOMEM;
1708 goto unroll_vsi_setup;
3a858ba3
AV
1709 }
1710
df0f8479
AV
1711 status = ice_cfg_netdev(vsi);
1712 if (status) {
1713 status = -ENODEV;
1714 goto unroll_vsi_setup;
1715 }
1716
1717 /* registering the NAPI handler requires both the queues and
1718 * netdev to be created, which are done in ice_pf_vsi_setup()
1719 * and ice_cfg_netdev() respectively
1720 */
1721 ice_napi_add(vsi);
1722
0f9d5027
AV
1723 /* To add a MAC filter, first add the MAC to a list and then
1724 * pass the list to ice_add_mac.
9daf8208 1725 */
0f9d5027
AV
1726
1727 /* Add a unicast MAC filter so the VSI can get its packets */
9daf8208
AV
1728 status = ice_add_mac_to_list(vsi, &tmp_add_list,
1729 vsi->port_info->mac.perm_addr);
1730 if (status)
df0f8479 1731 goto unroll_napi_add;
9daf8208
AV
1732
1733 /* VSI needs to receive broadcast traffic, so add the broadcast
0f9d5027 1734 * MAC address to the list as well.
9daf8208
AV
1735 */
1736 eth_broadcast_addr(broadcast);
1737 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
1738 if (status)
0f9d5027 1739 goto free_mac_list;
9daf8208
AV
1740
1741 /* program MAC filters for entries in tmp_add_list */
1742 status = ice_add_mac(&pf->hw, &tmp_add_list);
1743 if (status) {
1744 dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
1745 status = -ENOMEM;
0f9d5027 1746 goto free_mac_list;
9daf8208
AV
1747 }
1748
1749 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1750 return status;
1751
0f9d5027 1752free_mac_list:
9daf8208
AV
1753 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1754
df0f8479 1755unroll_napi_add:
3a858ba3 1756 if (vsi) {
df0f8479 1757 ice_napi_del(vsi);
3a858ba3 1758 if (vsi->netdev) {
df0f8479
AV
1759 if (vsi->netdev->reg_state == NETREG_REGISTERED)
1760 unregister_netdev(vsi->netdev);
3a858ba3
AV
1761 free_netdev(vsi->netdev);
1762 vsi->netdev = NULL;
1763 }
df0f8479 1764 }
9daf8208 1765
df0f8479
AV
1766unroll_vsi_setup:
1767 if (vsi) {
1768 ice_vsi_free_q_vectors(vsi);
3a858ba3
AV
1769 ice_vsi_delete(vsi);
1770 ice_vsi_put_qs(vsi);
1771 pf->q_left_tx += vsi->alloc_txq;
1772 pf->q_left_rx += vsi->alloc_rxq;
1773 ice_vsi_clear(vsi);
1774 }
1775 return status;
1776}
1777
940b61af
AV
1778/**
1779 * ice_determine_q_usage - Calculate queue distribution
1780 * @pf: board private structure
1781 *
1782 * Return -ENOMEM if we don't get enough queues for all ports
1783 */
1784static void ice_determine_q_usage(struct ice_pf *pf)
1785{
1786 u16 q_left_tx, q_left_rx;
1787
1788 q_left_tx = pf->hw.func_caps.common_cap.num_txq;
1789 q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
1790
5513b920 1791 pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
d76a60ba
AV
1792
1793 /* only 1 rx queue unless RSS is enabled */
1794 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1795 pf->num_lan_rx = 1;
1796 else
1797 pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
940b61af
AV
1798
1799 pf->q_left_tx = q_left_tx - pf->num_lan_tx;
1800 pf->q_left_rx = q_left_rx - pf->num_lan_rx;
1801}
1802
1803/**
1804 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
1805 * @pf: board private structure to initialize
1806 */
1807static void ice_deinit_pf(struct ice_pf *pf)
1808{
8d81fa55 1809 ice_service_task_stop(pf);
940b61af
AV
1810 mutex_destroy(&pf->sw_mutex);
1811 mutex_destroy(&pf->avail_q_mutex);
1812}
1813
1814/**
1815 * ice_init_pf - Initialize general software structures (struct ice_pf)
1816 * @pf: board private structure to initialize
1817 */
1818static void ice_init_pf(struct ice_pf *pf)
1819{
1820 bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
1821 set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
75d2b253
AV
1822#ifdef CONFIG_PCI_IOV
1823 if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
1824 struct ice_hw *hw = &pf->hw;
1825
1826 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
1827 pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
1828 ICE_MAX_VF_COUNT);
1829 }
1830#endif /* CONFIG_PCI_IOV */
940b61af
AV
1831
1832 mutex_init(&pf->sw_mutex);
1833 mutex_init(&pf->avail_q_mutex);
1834
1835 /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
1836 mutex_lock(&pf->avail_q_mutex);
1837 bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
1838 bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
1839 mutex_unlock(&pf->avail_q_mutex);
1840
d76a60ba
AV
1841 if (pf->hw.func_caps.common_cap.rss_table_size)
1842 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
1843
940b61af
AV
1844 /* setup service timer and periodic service task */
1845 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
1846 pf->serv_tmr_period = HZ;
1847 INIT_WORK(&pf->serv_task, ice_service_task);
1848 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1849}
1850
1851/**
1852 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
1853 * @pf: board private structure
1854 *
1855 * compute the number of MSIX vectors required (v_budget) and request from
1856 * the OS. Return the number of vectors reserved or negative on failure
1857 */
1858static int ice_ena_msix_range(struct ice_pf *pf)
1859{
1860 int v_left, v_actual, v_budget = 0;
1861 int needed, err, i;
1862
1863 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
1864
1865 /* reserve one vector for miscellaneous handler */
1866 needed = 1;
1867 v_budget += needed;
1868 v_left -= needed;
1869
1870 /* reserve vectors for LAN traffic */
1871 pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
1872 v_budget += pf->num_lan_msix;
eb0208ec 1873 v_left -= pf->num_lan_msix;
940b61af
AV
1874
1875 pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
1876 sizeof(struct msix_entry), GFP_KERNEL);
1877
1878 if (!pf->msix_entries) {
1879 err = -ENOMEM;
1880 goto exit_err;
1881 }
1882
1883 for (i = 0; i < v_budget; i++)
1884 pf->msix_entries[i].entry = i;
1885
1886 /* actually reserve the vectors */
1887 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
1888 ICE_MIN_MSIX, v_budget);
1889
1890 if (v_actual < 0) {
1891 dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
1892 err = v_actual;
1893 goto msix_err;
1894 }
1895
1896 if (v_actual < v_budget) {
1897 dev_warn(&pf->pdev->dev,
1898 "not enough vectors. requested = %d, obtained = %d\n",
1899 v_budget, v_actual);
1900 if (v_actual >= (pf->num_lan_msix + 1)) {
eb0208ec
PB
1901 pf->num_avail_sw_msix = v_actual -
1902 (pf->num_lan_msix + 1);
940b61af
AV
1903 } else if (v_actual >= 2) {
1904 pf->num_lan_msix = 1;
eb0208ec 1905 pf->num_avail_sw_msix = v_actual - 2;
940b61af
AV
1906 } else {
1907 pci_disable_msix(pf->pdev);
1908 err = -ERANGE;
1909 goto msix_err;
1910 }
1911 }
1912
1913 return v_actual;
1914
1915msix_err:
1916 devm_kfree(&pf->pdev->dev, pf->msix_entries);
1917 goto exit_err;
1918
1919exit_err:
1920 pf->num_lan_msix = 0;
1921 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
1922 return err;
1923}
1924
1925/**
1926 * ice_dis_msix - Disable MSI-X interrupt setup in OS
1927 * @pf: board private structure
1928 */
1929static void ice_dis_msix(struct ice_pf *pf)
1930{
1931 pci_disable_msix(pf->pdev);
1932 devm_kfree(&pf->pdev->dev, pf->msix_entries);
1933 pf->msix_entries = NULL;
1934 clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
1935}
1936
eb0208ec
PB
1937/**
1938 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
1939 * @pf: board private structure
1940 */
1941static void ice_clear_interrupt_scheme(struct ice_pf *pf)
1942{
1943 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1944 ice_dis_msix(pf);
1945
1946 if (pf->sw_irq_tracker) {
1947 devm_kfree(&pf->pdev->dev, pf->sw_irq_tracker);
1948 pf->sw_irq_tracker = NULL;
1949 }
1950
1951 if (pf->hw_irq_tracker) {
1952 devm_kfree(&pf->pdev->dev, pf->hw_irq_tracker);
1953 pf->hw_irq_tracker = NULL;
1954 }
1955}
1956
940b61af
AV
1957/**
1958 * ice_init_interrupt_scheme - Determine proper interrupt scheme
1959 * @pf: board private structure to initialize
1960 */
1961static int ice_init_interrupt_scheme(struct ice_pf *pf)
1962{
eb0208ec 1963 int vectors = 0, hw_vectors = 0;
940b61af
AV
1964 ssize_t size;
1965
1966 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1967 vectors = ice_ena_msix_range(pf);
1968 else
1969 return -ENODEV;
1970
1971 if (vectors < 0)
1972 return vectors;
1973
1974 /* set up vector assignment tracking */
1975 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
1976
eb0208ec
PB
1977 pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
1978 if (!pf->sw_irq_tracker) {
940b61af
AV
1979 ice_dis_msix(pf);
1980 return -ENOMEM;
1981 }
1982
eb0208ec
PB
1983 /* populate SW interrupts pool with number of OS granted IRQs. */
1984 pf->num_avail_sw_msix = vectors;
1985 pf->sw_irq_tracker->num_entries = vectors;
940b61af 1986
eb0208ec
PB
1987 /* set up HW vector assignment tracking */
1988 hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
1989 size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors);
940b61af 1990
eb0208ec
PB
1991 pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
1992 if (!pf->hw_irq_tracker) {
1993 ice_clear_interrupt_scheme(pf);
1994 return -ENOMEM;
c7f2c42b 1995 }
eb0208ec
PB
1996
1997 /* populate HW interrupts pool with number of HW supported irqs. */
1998 pf->num_avail_hw_msix = hw_vectors;
1999 pf->hw_irq_tracker->num_entries = hw_vectors;
2000
2001 return 0;
940b61af
AV
2002}
2003
837f08fd
AV
2004/**
2005 * ice_probe - Device initialization routine
2006 * @pdev: PCI device information struct
2007 * @ent: entry in ice_pci_tbl
2008 *
2009 * Returns 0 on success, negative on failure
2010 */
2011static int ice_probe(struct pci_dev *pdev,
2012 const struct pci_device_id __always_unused *ent)
2013{
2014 struct ice_pf *pf;
2015 struct ice_hw *hw;
2016 int err;
2017
2018 /* this driver uses devres, see Documentation/driver-model/devres.txt */
2019 err = pcim_enable_device(pdev);
2020 if (err)
2021 return err;
2022
2023 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
2024 if (err) {
3968540b 2025 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
837f08fd
AV
2026 return err;
2027 }
2028
2029 pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
2030 if (!pf)
2031 return -ENOMEM;
2032
2033 /* set up for high or low dma */
2034 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2035 if (err)
2036 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2037 if (err) {
2038 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
2039 return err;
2040 }
2041
2042 pci_enable_pcie_error_reporting(pdev);
2043 pci_set_master(pdev);
2044
2045 pf->pdev = pdev;
2046 pci_set_drvdata(pdev, pf);
2047 set_bit(__ICE_DOWN, pf->state);
8d81fa55
AA
2048 /* Disable service task until DOWN bit is cleared */
2049 set_bit(__ICE_SERVICE_DIS, pf->state);
837f08fd
AV
2050
2051 hw = &pf->hw;
2052 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
2053 hw->back = pf;
2054 hw->vendor_id = pdev->vendor;
2055 hw->device_id = pdev->device;
2056 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2057 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2058 hw->subsystem_device_id = pdev->subsystem_device;
2059 hw->bus.device = PCI_SLOT(pdev->devfn);
2060 hw->bus.func = PCI_FUNC(pdev->devfn);
f31e4b6f
AV
2061 ice_set_ctrlq_len(hw);
2062
837f08fd
AV
2063 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
2064
7ec59eea
AV
2065#ifndef CONFIG_DYNAMIC_DEBUG
2066 if (debug < -1)
2067 hw->debug_mask = debug;
2068#endif
2069
f31e4b6f
AV
2070 err = ice_init_hw(hw);
2071 if (err) {
2072 dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
2073 err = -EIO;
2074 goto err_exit_unroll;
2075 }
2076
2077 dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
2078 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2079 hw->api_maj_ver, hw->api_min_ver);
2080
940b61af
AV
2081 ice_init_pf(pf);
2082
2083 ice_determine_q_usage(pf);
2084
2085 pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
2086 hw->func_caps.guaranteed_num_vsi);
2087 if (!pf->num_alloc_vsi) {
2088 err = -EIO;
2089 goto err_init_pf_unroll;
2090 }
2091
2092 pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
2093 sizeof(struct ice_vsi *), GFP_KERNEL);
2094 if (!pf->vsi) {
2095 err = -ENOMEM;
2096 goto err_init_pf_unroll;
2097 }
2098
2099 err = ice_init_interrupt_scheme(pf);
2100 if (err) {
2101 dev_err(&pdev->dev,
2102 "ice_init_interrupt_scheme failed: %d\n", err);
2103 err = -EIO;
2104 goto err_init_interrupt_unroll;
2105 }
2106
8d81fa55
AA
2107 /* Driver is mostly up */
2108 clear_bit(__ICE_DOWN, pf->state);
2109
940b61af
AV
2110 /* In case of MSIX we are going to setup the misc vector right here
2111 * to handle admin queue events etc. In case of legacy and MSI
2112 * the misc functionality and queue processing is combined in
2113 * the same vector and that gets setup at open.
2114 */
2115 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
2116 err = ice_req_irq_msix_misc(pf);
2117 if (err) {
2118 dev_err(&pdev->dev,
2119 "setup of misc vector failed: %d\n", err);
2120 goto err_init_interrupt_unroll;
2121 }
2122 }
2123
2124 /* create switch struct for the switch element created by FW on boot */
2125 pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
2126 GFP_KERNEL);
2127 if (!pf->first_sw) {
2128 err = -ENOMEM;
2129 goto err_msix_misc_unroll;
2130 }
2131
b1edc14a
MFIP
2132 if (hw->evb_veb)
2133 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
2134 else
2135 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
2136
940b61af
AV
2137 pf->first_sw->pf = pf;
2138
2139 /* record the sw_id available for later use */
2140 pf->first_sw->sw_id = hw->port_info->sw_id;
2141
3a858ba3
AV
2142 err = ice_setup_pf_sw(pf);
2143 if (err) {
2144 dev_err(&pdev->dev,
2145 "probe failed due to setup pf switch:%d\n", err);
2146 goto err_alloc_sw_unroll;
2147 }
9daf8208 2148
8d81fa55 2149 clear_bit(__ICE_SERVICE_DIS, pf->state);
9daf8208
AV
2150
2151 /* since everything is good, start the service timer */
2152 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
2153
837f08fd 2154 return 0;
f31e4b6f 2155
3a858ba3 2156err_alloc_sw_unroll:
8d81fa55 2157 set_bit(__ICE_SERVICE_DIS, pf->state);
3a858ba3
AV
2158 set_bit(__ICE_DOWN, pf->state);
2159 devm_kfree(&pf->pdev->dev, pf->first_sw);
940b61af
AV
2160err_msix_misc_unroll:
2161 ice_free_irq_msix_misc(pf);
2162err_init_interrupt_unroll:
2163 ice_clear_interrupt_scheme(pf);
2164 devm_kfree(&pdev->dev, pf->vsi);
2165err_init_pf_unroll:
2166 ice_deinit_pf(pf);
2167 ice_deinit_hw(hw);
f31e4b6f
AV
2168err_exit_unroll:
2169 pci_disable_pcie_error_reporting(pdev);
2170 return err;
837f08fd
AV
2171}
2172
2173/**
2174 * ice_remove - Device removal routine
2175 * @pdev: PCI device information struct
2176 */
2177static void ice_remove(struct pci_dev *pdev)
2178{
2179 struct ice_pf *pf = pci_get_drvdata(pdev);
81b23589 2180 int i;
837f08fd
AV
2181
2182 if (!pf)
2183 return;
2184
2185 set_bit(__ICE_DOWN, pf->state);
8d81fa55 2186 ice_service_task_stop(pf);
f31e4b6f 2187
ddf30f7f
AV
2188 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
2189 ice_free_vfs(pf);
0f9d5027 2190 ice_vsi_release_all(pf);
940b61af 2191 ice_free_irq_msix_misc(pf);
81b23589
DE
2192 ice_for_each_vsi(pf, i) {
2193 if (!pf->vsi[i])
2194 continue;
2195 ice_vsi_free_q_vectors(pf->vsi[i]);
2196 }
940b61af
AV
2197 ice_clear_interrupt_scheme(pf);
2198 ice_deinit_pf(pf);
f31e4b6f 2199 ice_deinit_hw(&pf->hw);
837f08fd
AV
2200 pci_disable_pcie_error_reporting(pdev);
2201}
2202
2203/* ice_pci_tbl - PCI Device ID Table
2204 *
2205 * Wildcard entries (PCI_ANY_ID) should come last
2206 * Last entry must be all 0s
2207 *
2208 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
2209 * Class, Class Mask, private data (not used) }
2210 */
2211static const struct pci_device_id ice_pci_tbl[] = {
633d7449
AV
2212 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
2213 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
2214 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
837f08fd
AV
2215 /* required last entry */
2216 { 0, }
2217};
2218MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
2219
2220static struct pci_driver ice_driver = {
2221 .name = KBUILD_MODNAME,
2222 .id_table = ice_pci_tbl,
2223 .probe = ice_probe,
2224 .remove = ice_remove,
ddf30f7f 2225 .sriov_configure = ice_sriov_configure,
837f08fd
AV
2226};
2227
2228/**
2229 * ice_module_init - Driver registration routine
2230 *
2231 * ice_module_init is the first routine called when the driver is
2232 * loaded. All it does is register with the PCI subsystem.
2233 */
2234static int __init ice_module_init(void)
2235{
2236 int status;
2237
2238 pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
2239 pr_info("%s\n", ice_copyright);
2240
0f9d5027 2241 ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
940b61af
AV
2242 if (!ice_wq) {
2243 pr_err("Failed to create workqueue\n");
2244 return -ENOMEM;
2245 }
2246
837f08fd 2247 status = pci_register_driver(&ice_driver);
940b61af 2248 if (status) {
837f08fd 2249 pr_err("failed to register pci driver, err %d\n", status);
940b61af
AV
2250 destroy_workqueue(ice_wq);
2251 }
837f08fd
AV
2252
2253 return status;
2254}
2255module_init(ice_module_init);
2256
2257/**
2258 * ice_module_exit - Driver exit cleanup routine
2259 *
2260 * ice_module_exit is called just before the driver is removed
2261 * from memory.
2262 */
2263static void __exit ice_module_exit(void)
2264{
2265 pci_unregister_driver(&ice_driver);
940b61af 2266 destroy_workqueue(ice_wq);
837f08fd
AV
2267 pr_info("module unloaded\n");
2268}
2269module_exit(ice_module_exit);
3a858ba3 2270
e94d4478
AV
2271/**
2272 * ice_set_mac_address - NDO callback to set mac address
2273 * @netdev: network interface device structure
2274 * @pi: pointer to an address structure
2275 *
2276 * Returns 0 on success, negative on failure
2277 */
2278static int ice_set_mac_address(struct net_device *netdev, void *pi)
2279{
2280 struct ice_netdev_priv *np = netdev_priv(netdev);
2281 struct ice_vsi *vsi = np->vsi;
2282 struct ice_pf *pf = vsi->back;
2283 struct ice_hw *hw = &pf->hw;
2284 struct sockaddr *addr = pi;
2285 enum ice_status status;
2286 LIST_HEAD(a_mac_list);
2287 LIST_HEAD(r_mac_list);
2288 u8 flags = 0;
2289 int err;
2290 u8 *mac;
2291
2292 mac = (u8 *)addr->sa_data;
2293
2294 if (!is_valid_ether_addr(mac))
2295 return -EADDRNOTAVAIL;
2296
2297 if (ether_addr_equal(netdev->dev_addr, mac)) {
2298 netdev_warn(netdev, "already using mac %pM\n", mac);
2299 return 0;
2300 }
2301
2302 if (test_bit(__ICE_DOWN, pf->state) ||
5df7e45d 2303 ice_is_reset_in_progress(pf->state)) {
e94d4478
AV
2304 netdev_err(netdev, "can't set mac %pM. device not ready\n",
2305 mac);
2306 return -EBUSY;
2307 }
2308
2309 /* When we change the mac address we also have to change the mac address
2310 * based filter rules that were created previously for the old mac
2311 * address. So first, we remove the old filter rule using ice_remove_mac
2312 * and then create a new filter rule using ice_add_mac. Note that for
2313 * both these operations, we first need to form a "list" of mac
2314 * addresses (even though in this case, we have only 1 mac address to be
2315 * added/removed) and this done using ice_add_mac_to_list. Depending on
2316 * the ensuing operation this "list" of mac addresses is either to be
2317 * added or removed from the filter.
2318 */
2319 err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
2320 if (err) {
2321 err = -EADDRNOTAVAIL;
2322 goto free_lists;
2323 }
2324
2325 status = ice_remove_mac(hw, &r_mac_list);
2326 if (status) {
2327 err = -EADDRNOTAVAIL;
2328 goto free_lists;
2329 }
2330
2331 err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
2332 if (err) {
2333 err = -EADDRNOTAVAIL;
2334 goto free_lists;
2335 }
2336
2337 status = ice_add_mac(hw, &a_mac_list);
2338 if (status) {
2339 err = -EADDRNOTAVAIL;
2340 goto free_lists;
2341 }
2342
2343free_lists:
2344 /* free list entries */
2345 ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
2346 ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
2347
2348 if (err) {
2349 netdev_err(netdev, "can't set mac %pM. filter update failed\n",
2350 mac);
2351 return err;
2352 }
2353
2354 /* change the netdev's mac address */
2355 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2356 netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
2357 netdev->dev_addr);
2358
2359 /* write new mac address to the firmware */
2360 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2361 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
2362 if (status) {
2363 netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
2364 mac);
2365 }
2366 return 0;
2367}
2368
2369/**
2370 * ice_set_rx_mode - NDO callback to set the netdev filters
2371 * @netdev: network interface device structure
2372 */
2373static void ice_set_rx_mode(struct net_device *netdev)
2374{
2375 struct ice_netdev_priv *np = netdev_priv(netdev);
2376 struct ice_vsi *vsi = np->vsi;
2377
2378 if (!vsi)
2379 return;
2380
2381 /* Set the flags to synchronize filters
2382 * ndo_set_rx_mode may be triggered even without a change in netdev
2383 * flags
2384 */
2385 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
2386 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
2387 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
2388
2389 /* schedule our worker thread which will take care of
2390 * applying the new filter changes
2391 */
2392 ice_service_task_schedule(vsi->back);
2393}
2394
2395/**
2396 * ice_fdb_add - add an entry to the hardware database
2397 * @ndm: the input from the stack
2398 * @tb: pointer to array of nladdr (unused)
2399 * @dev: the net device pointer
2400 * @addr: the MAC address entry being added
2401 * @vid: VLAN id
2402 * @flags: instructions from stack about fdb operation
2403 */
2404static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
2405 struct net_device *dev, const unsigned char *addr,
2406 u16 vid, u16 flags)
2407{
2408 int err;
2409
2410 if (vid) {
2411 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
2412 return -EINVAL;
2413 }
2414 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
2415 netdev_err(dev, "FDB only supports static addresses\n");
2416 return -EINVAL;
2417 }
2418
2419 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2420 err = dev_uc_add_excl(dev, addr);
2421 else if (is_multicast_ether_addr(addr))
2422 err = dev_mc_add_excl(dev, addr);
2423 else
2424 err = -EINVAL;
2425
2426 /* Only return duplicate errors if NLM_F_EXCL is set */
2427 if (err == -EEXIST && !(flags & NLM_F_EXCL))
2428 err = 0;
2429
2430 return err;
2431}
2432
2433/**
2434 * ice_fdb_del - delete an entry from the hardware database
2435 * @ndm: the input from the stack
2436 * @tb: pointer to array of nladdr (unused)
2437 * @dev: the net device pointer
2438 * @addr: the MAC address entry being added
2439 * @vid: VLAN id
2440 */
2441static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
2442 struct net_device *dev, const unsigned char *addr,
2443 __always_unused u16 vid)
2444{
2445 int err;
2446
2447 if (ndm->ndm_state & NUD_PERMANENT) {
2448 netdev_err(dev, "FDB only supports static addresses\n");
2449 return -EINVAL;
2450 }
2451
2452 if (is_unicast_ether_addr(addr))
2453 err = dev_uc_del(dev, addr);
2454 else if (is_multicast_ether_addr(addr))
2455 err = dev_mc_del(dev, addr);
2456 else
2457 err = -EINVAL;
2458
2459 return err;
2460}
2461
d76a60ba
AV
2462/**
2463 * ice_set_features - set the netdev feature flags
2464 * @netdev: ptr to the netdev being adjusted
2465 * @features: the feature set that the stack is suggesting
2466 */
2467static int ice_set_features(struct net_device *netdev,
2468 netdev_features_t features)
2469{
2470 struct ice_netdev_priv *np = netdev_priv(netdev);
2471 struct ice_vsi *vsi = np->vsi;
2472 int ret = 0;
2473
492af0ab
MFIP
2474 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
2475 ret = ice_vsi_manage_rss_lut(vsi, true);
2476 else if (!(features & NETIF_F_RXHASH) &&
2477 netdev->features & NETIF_F_RXHASH)
2478 ret = ice_vsi_manage_rss_lut(vsi, false);
2479
d76a60ba
AV
2480 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
2481 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
2482 ret = ice_vsi_manage_vlan_stripping(vsi, true);
2483 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
2484 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
2485 ret = ice_vsi_manage_vlan_stripping(vsi, false);
2486 else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
2487 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
2488 ret = ice_vsi_manage_vlan_insertion(vsi);
2489 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
2490 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
2491 ret = ice_vsi_manage_vlan_insertion(vsi);
2492
2493 return ret;
2494}
2495
2496/**
2497 * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
2498 * @vsi: VSI to setup vlan properties for
2499 */
2500static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
2501{
2502 int ret = 0;
2503
2504 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2505 ret = ice_vsi_manage_vlan_stripping(vsi, true);
2506 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
2507 ret = ice_vsi_manage_vlan_insertion(vsi);
2508
2509 return ret;
2510}
2511
2512/**
2513 * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
2514 * @vsi: the VSI being brought back up
2515 */
2516static int ice_restore_vlan(struct ice_vsi *vsi)
2517{
2518 int err;
2519 u16 vid;
2520
2521 if (!vsi->netdev)
2522 return -EINVAL;
2523
2524 err = ice_vsi_vlan_setup(vsi);
2525 if (err)
2526 return err;
2527
2528 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
2529 err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
2530 if (err)
2531 break;
2532 }
2533
2534 return err;
2535}
2536
cdedef59
AV
2537/**
2538 * ice_vsi_cfg - Setup the VSI
2539 * @vsi: the VSI being configured
2540 *
2541 * Return 0 on success and negative value on error
2542 */
2543static int ice_vsi_cfg(struct ice_vsi *vsi)
2544{
2545 int err;
2546
c7f2c42b
AV
2547 if (vsi->netdev) {
2548 ice_set_rx_mode(vsi->netdev);
2549 err = ice_restore_vlan(vsi);
2550 if (err)
2551 return err;
2552 }
d76a60ba 2553
cdedef59
AV
2554 err = ice_vsi_cfg_txqs(vsi);
2555 if (!err)
2556 err = ice_vsi_cfg_rxqs(vsi);
2557
2558 return err;
2559}
2560
2b245cb2
AV
2561/**
2562 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
2563 * @vsi: the VSI being configured
2564 */
2565static void ice_napi_enable_all(struct ice_vsi *vsi)
2566{
2567 int q_idx;
2568
2569 if (!vsi->netdev)
2570 return;
2571
2572 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
2573 napi_enable(&vsi->q_vectors[q_idx]->napi);
2574}
2575
cdedef59
AV
2576/**
2577 * ice_up_complete - Finish the last steps of bringing up a connection
2578 * @vsi: The VSI being configured
2579 *
2580 * Return 0 on success and negative value on error
2581 */
2582static int ice_up_complete(struct ice_vsi *vsi)
2583{
2584 struct ice_pf *pf = vsi->back;
2585 int err;
2586
2587 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
2588 ice_vsi_cfg_msix(vsi);
2589 else
2590 return -ENOTSUPP;
2591
2592 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2593 * Tx queue group list was configured and the context bits were
2594 * programmed using ice_vsi_cfg_txqs
2595 */
2596 err = ice_vsi_start_rx_rings(vsi);
2597 if (err)
2598 return err;
2599
2600 clear_bit(__ICE_DOWN, vsi->state);
2b245cb2 2601 ice_napi_enable_all(vsi);
cdedef59
AV
2602 ice_vsi_ena_irq(vsi);
2603
2604 if (vsi->port_info &&
2605 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
2606 vsi->netdev) {
2607 ice_print_link_msg(vsi, true);
2608 netif_tx_start_all_queues(vsi->netdev);
2609 netif_carrier_on(vsi->netdev);
2610 }
2611
2612 ice_service_task_schedule(pf);
2613
2614 return err;
2615}
2616
fcea6f3d
AV
2617/**
2618 * ice_up - Bring the connection back up after being down
2619 * @vsi: VSI being configured
2620 */
2621int ice_up(struct ice_vsi *vsi)
2622{
2623 int err;
2624
2625 err = ice_vsi_cfg(vsi);
2626 if (!err)
2627 err = ice_up_complete(vsi);
2628
2629 return err;
2630}
2631
2632/**
2633 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
2634 * @ring: Tx or Rx ring to read stats from
2635 * @pkts: packets stats counter
2636 * @bytes: bytes stats counter
2637 *
2638 * This function fetches stats from the ring considering the atomic operations
2639 * that needs to be performed to read u64 values in 32 bit machine.
2640 */
2641static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
2642 u64 *bytes)
2643{
2644 unsigned int start;
2645 *pkts = 0;
2646 *bytes = 0;
2647
2648 if (!ring)
2649 return;
2650 do {
2651 start = u64_stats_fetch_begin_irq(&ring->syncp);
2652 *pkts = ring->stats.pkts;
2653 *bytes = ring->stats.bytes;
2654 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
2655}
2656
fcea6f3d
AV
2657/**
2658 * ice_update_vsi_ring_stats - Update VSI stats counters
2659 * @vsi: the VSI to be updated
2660 */
2661static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
2662{
2663 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
2664 struct ice_ring *ring;
2665 u64 pkts, bytes;
2666 int i;
2667
2668 /* reset netdev stats */
2669 vsi_stats->tx_packets = 0;
2670 vsi_stats->tx_bytes = 0;
2671 vsi_stats->rx_packets = 0;
2672 vsi_stats->rx_bytes = 0;
2673
2674 /* reset non-netdev (extended) stats */
2675 vsi->tx_restart = 0;
2676 vsi->tx_busy = 0;
2677 vsi->tx_linearize = 0;
2678 vsi->rx_buf_failed = 0;
2679 vsi->rx_page_failed = 0;
2680
2681 rcu_read_lock();
2682
2683 /* update Tx rings counters */
2684 ice_for_each_txq(vsi, i) {
2685 ring = READ_ONCE(vsi->tx_rings[i]);
2686 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
2687 vsi_stats->tx_packets += pkts;
2688 vsi_stats->tx_bytes += bytes;
2689 vsi->tx_restart += ring->tx_stats.restart_q;
2690 vsi->tx_busy += ring->tx_stats.tx_busy;
2691 vsi->tx_linearize += ring->tx_stats.tx_linearize;
2692 }
2693
2694 /* update Rx rings counters */
2695 ice_for_each_rxq(vsi, i) {
2696 ring = READ_ONCE(vsi->rx_rings[i]);
2697 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
2698 vsi_stats->rx_packets += pkts;
2699 vsi_stats->rx_bytes += bytes;
2700 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
2701 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
2702 }
2703
2704 rcu_read_unlock();
2705}
2706
2707/**
2708 * ice_update_vsi_stats - Update VSI stats counters
2709 * @vsi: the VSI to be updated
2710 */
2711static void ice_update_vsi_stats(struct ice_vsi *vsi)
2712{
2713 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
2714 struct ice_eth_stats *cur_es = &vsi->eth_stats;
2715 struct ice_pf *pf = vsi->back;
2716
2717 if (test_bit(__ICE_DOWN, vsi->state) ||
2718 test_bit(__ICE_CFG_BUSY, pf->state))
2719 return;
2720
2721 /* get stats as recorded by Tx/Rx rings */
2722 ice_update_vsi_ring_stats(vsi);
2723
2724 /* get VSI stats as recorded by the hardware */
2725 ice_update_eth_stats(vsi);
2726
2727 cur_ns->tx_errors = cur_es->tx_errors;
2728 cur_ns->rx_dropped = cur_es->rx_discards;
2729 cur_ns->tx_dropped = cur_es->tx_discards;
2730 cur_ns->multicast = cur_es->rx_multicast;
2731
2732 /* update some more netdev stats if this is main VSI */
2733 if (vsi->type == ICE_VSI_PF) {
2734 cur_ns->rx_crc_errors = pf->stats.crc_errors;
2735 cur_ns->rx_errors = pf->stats.crc_errors +
2736 pf->stats.illegal_bytes;
2737 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
2738 }
2739}
2740
2741/**
2742 * ice_update_pf_stats - Update PF port stats counters
2743 * @pf: PF whose stats needs to be updated
2744 */
2745static void ice_update_pf_stats(struct ice_pf *pf)
2746{
2747 struct ice_hw_port_stats *prev_ps, *cur_ps;
2748 struct ice_hw *hw = &pf->hw;
2749 u8 pf_id;
2750
2751 prev_ps = &pf->stats_prev;
2752 cur_ps = &pf->stats;
2753 pf_id = hw->pf_id;
2754
2755 ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
2756 pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
2757 &cur_ps->eth.rx_bytes);
2758
2759 ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
2760 pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
2761 &cur_ps->eth.rx_unicast);
2762
2763 ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
2764 pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
2765 &cur_ps->eth.rx_multicast);
2766
2767 ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
2768 pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
2769 &cur_ps->eth.rx_broadcast);
2770
2771 ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
2772 pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
2773 &cur_ps->eth.tx_bytes);
2774
2775 ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
2776 pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
2777 &cur_ps->eth.tx_unicast);
2778
2779 ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
2780 pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
2781 &cur_ps->eth.tx_multicast);
2782
2783 ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
2784 pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
2785 &cur_ps->eth.tx_broadcast);
2786
2787 ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
2788 &prev_ps->tx_dropped_link_down,
2789 &cur_ps->tx_dropped_link_down);
2790
2791 ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
2792 pf->stat_prev_loaded, &prev_ps->rx_size_64,
2793 &cur_ps->rx_size_64);
2794
2795 ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
2796 pf->stat_prev_loaded, &prev_ps->rx_size_127,
2797 &cur_ps->rx_size_127);
2798
2799 ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
2800 pf->stat_prev_loaded, &prev_ps->rx_size_255,
2801 &cur_ps->rx_size_255);
2802
2803 ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
2804 pf->stat_prev_loaded, &prev_ps->rx_size_511,
2805 &cur_ps->rx_size_511);
2806
2807 ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
2808 GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
2809 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
2810
2811 ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
2812 GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
2813 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
2814
2815 ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
2816 GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
2817 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
2818
2819 ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
2820 pf->stat_prev_loaded, &prev_ps->tx_size_64,
2821 &cur_ps->tx_size_64);
2822
2823 ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
2824 pf->stat_prev_loaded, &prev_ps->tx_size_127,
2825 &cur_ps->tx_size_127);
2826
2827 ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
2828 pf->stat_prev_loaded, &prev_ps->tx_size_255,
2829 &cur_ps->tx_size_255);
2830
2831 ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
2832 pf->stat_prev_loaded, &prev_ps->tx_size_511,
2833 &cur_ps->tx_size_511);
2834
2835 ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
2836 GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
2837 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
2838
2839 ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
2840 GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
2841 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
2842
2843 ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
2844 GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
2845 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
2846
2847 ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
2848 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
2849
2850 ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
2851 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
2852
2853 ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
2854 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
2855
2856 ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
2857 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
2858
2859 ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
2860 &prev_ps->crc_errors, &cur_ps->crc_errors);
2861
2862 ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
2863 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
2864
2865 ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
2866 &prev_ps->mac_local_faults,
2867 &cur_ps->mac_local_faults);
2868
2869 ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
2870 &prev_ps->mac_remote_faults,
2871 &cur_ps->mac_remote_faults);
2872
2873 ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
2874 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
2875
2876 ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
2877 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
2878
2879 ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
2880 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
2881
2882 ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
2883 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
2884
2885 ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
2886 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
2887
2888 pf->stat_prev_loaded = true;
2889}
2890
2891/**
2892 * ice_get_stats64 - get statistics for network device structure
2893 * @netdev: network interface device structure
2894 * @stats: main device statistics structure
2895 */
2896static
2897void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2898{
2899 struct ice_netdev_priv *np = netdev_priv(netdev);
2900 struct rtnl_link_stats64 *vsi_stats;
2901 struct ice_vsi *vsi = np->vsi;
2902
2903 vsi_stats = &vsi->net_stats;
2904
2905 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
2906 return;
2907 /* netdev packet/byte stats come from ring counter. These are obtained
2908 * by summing up ring counters (done by ice_update_vsi_ring_stats).
2909 */
2910 ice_update_vsi_ring_stats(vsi);
2911 stats->tx_packets = vsi_stats->tx_packets;
2912 stats->tx_bytes = vsi_stats->tx_bytes;
2913 stats->rx_packets = vsi_stats->rx_packets;
2914 stats->rx_bytes = vsi_stats->rx_bytes;
2915
2916 /* The rest of the stats can be read from the hardware but instead we
2917 * just return values that the watchdog task has already obtained from
2918 * the hardware.
2919 */
2920 stats->multicast = vsi_stats->multicast;
2921 stats->tx_errors = vsi_stats->tx_errors;
2922 stats->tx_dropped = vsi_stats->tx_dropped;
2923 stats->rx_errors = vsi_stats->rx_errors;
2924 stats->rx_dropped = vsi_stats->rx_dropped;
2925 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
2926 stats->rx_length_errors = vsi_stats->rx_length_errors;
2927}
2928
2b245cb2
AV
2929/**
2930 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
2931 * @vsi: VSI having NAPI disabled
2932 */
2933static void ice_napi_disable_all(struct ice_vsi *vsi)
2934{
2935 int q_idx;
2936
2937 if (!vsi->netdev)
2938 return;
2939
2940 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
2941 napi_disable(&vsi->q_vectors[q_idx]->napi);
2942}
2943
cdedef59
AV
2944/**
2945 * ice_down - Shutdown the connection
2946 * @vsi: The VSI being stopped
2947 */
fcea6f3d 2948int ice_down(struct ice_vsi *vsi)
cdedef59 2949{
72adf242 2950 int i, tx_err, rx_err;
cdedef59
AV
2951
2952 /* Caller of this function is expected to set the
2953 * vsi->state __ICE_DOWN bit
2954 */
2955 if (vsi->netdev) {
2956 netif_carrier_off(vsi->netdev);
2957 netif_tx_disable(vsi->netdev);
2958 }
2959
2960 ice_vsi_dis_irq(vsi);
ddf30f7f 2961 tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0);
72adf242
AV
2962 if (tx_err)
2963 netdev_err(vsi->netdev,
2964 "Failed stop Tx rings, VSI %d error %d\n",
2965 vsi->vsi_num, tx_err);
2966
2967 rx_err = ice_vsi_stop_rx_rings(vsi);
2968 if (rx_err)
2969 netdev_err(vsi->netdev,
2970 "Failed stop Rx rings, VSI %d error %d\n",
2971 vsi->vsi_num, rx_err);
2972
2b245cb2 2973 ice_napi_disable_all(vsi);
cdedef59
AV
2974
2975 ice_for_each_txq(vsi, i)
2976 ice_clean_tx_ring(vsi->tx_rings[i]);
2977
2978 ice_for_each_rxq(vsi, i)
2979 ice_clean_rx_ring(vsi->rx_rings[i]);
2980
72adf242
AV
2981 if (tx_err || rx_err) {
2982 netdev_err(vsi->netdev,
2983 "Failed to close VSI 0x%04X on switch 0x%04X\n",
cdedef59 2984 vsi->vsi_num, vsi->vsw->sw_id);
72adf242
AV
2985 return -EIO;
2986 }
2987
2988 return 0;
cdedef59
AV
2989}
2990
2991/**
2992 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
2993 * @vsi: VSI having resources allocated
2994 *
2995 * Return 0 on success, negative on failure
2996 */
2997static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
2998{
dab0588f 2999 int i, err = 0;
cdedef59
AV
3000
3001 if (!vsi->num_txq) {
3002 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
3003 vsi->vsi_num);
3004 return -EINVAL;
3005 }
3006
3007 ice_for_each_txq(vsi, i) {
72adf242 3008 vsi->tx_rings[i]->netdev = vsi->netdev;
cdedef59
AV
3009 err = ice_setup_tx_ring(vsi->tx_rings[i]);
3010 if (err)
3011 break;
3012 }
3013
3014 return err;
3015}
3016
3017/**
3018 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
3019 * @vsi: VSI having resources allocated
3020 *
3021 * Return 0 on success, negative on failure
3022 */
3023static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
3024{
dab0588f 3025 int i, err = 0;
cdedef59
AV
3026
3027 if (!vsi->num_rxq) {
3028 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
3029 vsi->vsi_num);
3030 return -EINVAL;
3031 }
3032
3033 ice_for_each_rxq(vsi, i) {
72adf242 3034 vsi->rx_rings[i]->netdev = vsi->netdev;
cdedef59
AV
3035 err = ice_setup_rx_ring(vsi->rx_rings[i]);
3036 if (err)
3037 break;
3038 }
3039
3040 return err;
3041}
3042
3043/**
3044 * ice_vsi_req_irq - Request IRQ from the OS
3045 * @vsi: The VSI IRQ is being requested for
3046 * @basename: name for the vector
3047 *
3048 * Return 0 on success and a negative value on error
3049 */
3050static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
3051{
3052 struct ice_pf *pf = vsi->back;
3053 int err = -EINVAL;
3054
3055 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3056 err = ice_vsi_req_irq_msix(vsi, basename);
3057
3058 return err;
3059}
3060
cdedef59
AV
3061/**
3062 * ice_vsi_open - Called when a network interface is made active
3063 * @vsi: the VSI to open
3064 *
3065 * Initialization of the VSI
3066 *
3067 * Returns 0 on success, negative value on error
3068 */
3069static int ice_vsi_open(struct ice_vsi *vsi)
3070{
3071 char int_name[ICE_INT_NAME_STR_LEN];
3072 struct ice_pf *pf = vsi->back;
3073 int err;
3074
3075 /* allocate descriptors */
3076 err = ice_vsi_setup_tx_rings(vsi);
3077 if (err)
3078 goto err_setup_tx;
3079
3080 err = ice_vsi_setup_rx_rings(vsi);
3081 if (err)
3082 goto err_setup_rx;
3083
3084 err = ice_vsi_cfg(vsi);
3085 if (err)
3086 goto err_setup_rx;
3087
3088 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3089 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
3090 err = ice_vsi_req_irq(vsi, int_name);
3091 if (err)
3092 goto err_setup_rx;
3093
3094 /* Notify the stack of the actual queue counts. */
3095 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
3096 if (err)
3097 goto err_set_qs;
3098
3099 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
3100 if (err)
3101 goto err_set_qs;
3102
3103 err = ice_up_complete(vsi);
3104 if (err)
3105 goto err_up_complete;
3106
3107 return 0;
3108
3109err_up_complete:
3110 ice_down(vsi);
3111err_set_qs:
3112 ice_vsi_free_irq(vsi);
3113err_setup_rx:
3114 ice_vsi_free_rx_rings(vsi);
3115err_setup_tx:
3116 ice_vsi_free_tx_rings(vsi);
3117
3118 return err;
3119}
3120
0f9d5027
AV
3121/**
3122 * ice_vsi_release_all - Delete all VSIs
3123 * @pf: PF from which all VSIs are being removed
3124 */
3125static void ice_vsi_release_all(struct ice_pf *pf)
3126{
3127 int err, i;
3128
3129 if (!pf->vsi)
3130 return;
3131
3132 for (i = 0; i < pf->num_alloc_vsi; i++) {
3133 if (!pf->vsi[i])
3134 continue;
3135
3136 err = ice_vsi_release(pf->vsi[i]);
3137 if (err)
3138 dev_dbg(&pf->pdev->dev,
3139 "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
3140 i, err, pf->vsi[i]->vsi_num);
3141 }
3142}
3143
0b28b702
AV
3144/**
3145 * ice_dis_vsi - pause a VSI
3146 * @vsi: the VSI being paused
3147 */
3148static void ice_dis_vsi(struct ice_vsi *vsi)
3149{
3150 if (test_bit(__ICE_DOWN, vsi->state))
3151 return;
3152
3153 set_bit(__ICE_NEEDS_RESTART, vsi->state);
3154
124cd547
DE
3155 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
3156 if (netif_running(vsi->netdev)) {
3157 rtnl_lock();
3158 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3159 rtnl_unlock();
3160 } else {
3161 ice_vsi_close(vsi);
3162 }
0f9d5027 3163 }
0b28b702
AV
3164}
3165
3166/**
3167 * ice_ena_vsi - resume a VSI
3168 * @vsi: the VSI being resume
3169 */
0f9d5027 3170static int ice_ena_vsi(struct ice_vsi *vsi)
0b28b702 3171{
0f9d5027
AV
3172 int err = 0;
3173
124cd547
DE
3174 if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
3175 vsi->netdev) {
3176 if (netif_running(vsi->netdev)) {
0f9d5027
AV
3177 rtnl_lock();
3178 err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3179 rtnl_unlock();
124cd547
DE
3180 } else {
3181 err = ice_vsi_open(vsi);
0f9d5027 3182 }
124cd547 3183 }
0b28b702 3184
0f9d5027 3185 return err;
0b28b702
AV
3186}
3187
3188/**
3189 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
3190 * @pf: the PF
3191 */
3192static void ice_pf_dis_all_vsi(struct ice_pf *pf)
3193{
3194 int v;
3195
3196 ice_for_each_vsi(pf, v)
3197 if (pf->vsi[v])
3198 ice_dis_vsi(pf->vsi[v]);
3199}
3200
3201/**
3202 * ice_pf_ena_all_vsi - Resume all VSIs on a PF
3203 * @pf: the PF
3204 */
0f9d5027 3205static int ice_pf_ena_all_vsi(struct ice_pf *pf)
0b28b702
AV
3206{
3207 int v;
3208
3209 ice_for_each_vsi(pf, v)
3210 if (pf->vsi[v])
0f9d5027
AV
3211 if (ice_ena_vsi(pf->vsi[v]))
3212 return -EIO;
3213
3214 return 0;
3215}
3216
3217/**
3218 * ice_vsi_rebuild_all - rebuild all VSIs in pf
3219 * @pf: the PF
3220 */
3221static int ice_vsi_rebuild_all(struct ice_pf *pf)
3222{
3223 int i;
3224
3225 /* loop through pf->vsi array and reinit the VSI if found */
3226 for (i = 0; i < pf->num_alloc_vsi; i++) {
3227 int err;
3228
3229 if (!pf->vsi[i])
3230 continue;
3231
007676b4
AV
3232 /* VF VSI rebuild isn't supported yet */
3233 if (pf->vsi[i]->type == ICE_VSI_VF)
3234 continue;
3235
0f9d5027
AV
3236 err = ice_vsi_rebuild(pf->vsi[i]);
3237 if (err) {
3238 dev_err(&pf->pdev->dev,
3239 "VSI at index %d rebuild failed\n",
3240 pf->vsi[i]->idx);
3241 return err;
3242 }
3243
3244 dev_info(&pf->pdev->dev,
3245 "VSI at index %d rebuilt. vsi_num = 0x%x\n",
3246 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
3247 }
3248
3249 return 0;
0b28b702
AV
3250}
3251
334cb062
AV
3252/**
3253 * ice_vsi_replay_all - replay all VSIs configuration in the PF
3254 * @pf: the PF
3255 */
3256static int ice_vsi_replay_all(struct ice_pf *pf)
3257{
3258 struct ice_hw *hw = &pf->hw;
3259 enum ice_status ret;
3260 int i;
3261
3262 /* loop through pf->vsi array and replay the VSI if found */
3263 for (i = 0; i < pf->num_alloc_vsi; i++) {
3264 if (!pf->vsi[i])
3265 continue;
3266
3267 ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
3268 if (ret) {
3269 dev_err(&pf->pdev->dev,
3270 "VSI at index %d replay failed %d\n",
3271 pf->vsi[i]->idx, ret);
3272 return -EIO;
3273 }
3274
3275 /* Re-map HW VSI number, using VSI handle that has been
3276 * previously validated in ice_replay_vsi() call above
3277 */
3278 pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
3279
3280 dev_info(&pf->pdev->dev,
3281 "VSI at index %d filter replayed successfully - vsi_num %i\n",
3282 pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
3283 }
3284
3285 /* Clean up replay filter after successful re-configuration */
3286 ice_replay_post(hw);
3287 return 0;
3288}
3289
0b28b702
AV
3290/**
3291 * ice_rebuild - rebuild after reset
3292 * @pf: pf to rebuild
3293 */
3294static void ice_rebuild(struct ice_pf *pf)
3295{
3296 struct device *dev = &pf->pdev->dev;
3297 struct ice_hw *hw = &pf->hw;
3298 enum ice_status ret;
ce317dd9 3299 int err, i;
0b28b702
AV
3300
3301 if (test_bit(__ICE_DOWN, pf->state))
3302 goto clear_recovery;
3303
3304 dev_dbg(dev, "rebuilding pf\n");
3305
3306 ret = ice_init_all_ctrlq(hw);
3307 if (ret) {
3308 dev_err(dev, "control queues init failed %d\n", ret);
0f9d5027 3309 goto err_init_ctrlq;
0b28b702
AV
3310 }
3311
3312 ret = ice_clear_pf_cfg(hw);
3313 if (ret) {
3314 dev_err(dev, "clear PF configuration failed %d\n", ret);
0f9d5027 3315 goto err_init_ctrlq;
0b28b702
AV
3316 }
3317
3318 ice_clear_pxe_mode(hw);
3319
3320 ret = ice_get_caps(hw);
3321 if (ret) {
3322 dev_err(dev, "ice_get_caps failed %d\n", ret);
0f9d5027 3323 goto err_init_ctrlq;
0b28b702
AV
3324 }
3325
0f9d5027
AV
3326 err = ice_sched_init_port(hw->port_info);
3327 if (err)
3328 goto err_sched_init_port;
3329
eb0208ec
PB
3330 /* reset search_hint of irq_trackers to 0 since interrupts are
3331 * reclaimed and could be allocated from beginning during VSI rebuild
3332 */
3333 pf->sw_irq_tracker->search_hint = 0;
3334 pf->hw_irq_tracker->search_hint = 0;
3335
0f9d5027 3336 err = ice_vsi_rebuild_all(pf);
0b28b702 3337 if (err) {
0f9d5027
AV
3338 dev_err(dev, "ice_vsi_rebuild_all failed\n");
3339 goto err_vsi_rebuild;
3340 }
3341
5755143d
DE
3342 err = ice_update_link_info(hw->port_info);
3343 if (err)
3344 dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
3345
334cb062
AV
3346 /* Replay all VSIs Configuration, including filters after reset */
3347 if (ice_vsi_replay_all(pf)) {
0f9d5027 3348 dev_err(&pf->pdev->dev,
334cb062 3349 "error replaying VSI configurations with switch filter rules\n");
0f9d5027 3350 goto err_vsi_rebuild;
0b28b702
AV
3351 }
3352
3353 /* start misc vector */
3354 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
3355 err = ice_req_irq_msix_misc(pf);
3356 if (err) {
3357 dev_err(dev, "misc vector setup failed: %d\n", err);
0f9d5027 3358 goto err_vsi_rebuild;
0b28b702
AV
3359 }
3360 }
3361
3362 /* restart the VSIs that were rebuilt and running before the reset */
0f9d5027
AV
3363 err = ice_pf_ena_all_vsi(pf);
3364 if (err) {
3365 dev_err(&pf->pdev->dev, "error enabling VSIs\n");
3366 /* no need to disable VSIs in tear down path in ice_rebuild()
3367 * since its already taken care in ice_vsi_open()
3368 */
3369 goto err_vsi_rebuild;
3370 }
0b28b702 3371
ddf30f7f 3372 ice_reset_all_vfs(pf, true);
ce317dd9
AV
3373
3374 for (i = 0; i < pf->num_alloc_vsi; i++) {
3375 bool link_up;
3376
3377 if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
3378 continue;
3379 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
3380 if (link_up) {
3381 netif_carrier_on(pf->vsi[i]->netdev);
3382 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
3383 } else {
3384 netif_carrier_off(pf->vsi[i]->netdev);
3385 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
3386 }
3387 }
3388
0f9d5027
AV
3389 /* if we get here, reset flow is successful */
3390 clear_bit(__ICE_RESET_FAILED, pf->state);
0b28b702
AV
3391 return;
3392
0f9d5027
AV
3393err_vsi_rebuild:
3394 ice_vsi_release_all(pf);
3395err_sched_init_port:
3396 ice_sched_cleanup_all(hw);
3397err_init_ctrlq:
0b28b702
AV
3398 ice_shutdown_all_ctrlq(hw);
3399 set_bit(__ICE_RESET_FAILED, pf->state);
3400clear_recovery:
0f9d5027
AV
3401 /* set this bit in PF state to control service task scheduling */
3402 set_bit(__ICE_NEEDS_RESTART, pf->state);
3403 dev_err(dev, "Rebuild failed, unload and reload driver\n");
0b28b702
AV
3404}
3405
e94d4478
AV
3406/**
3407 * ice_change_mtu - NDO callback to change the MTU
3408 * @netdev: network interface device structure
3409 * @new_mtu: new value for maximum frame size
3410 *
3411 * Returns 0 on success, negative on failure
3412 */
3413static int ice_change_mtu(struct net_device *netdev, int new_mtu)
3414{
3415 struct ice_netdev_priv *np = netdev_priv(netdev);
3416 struct ice_vsi *vsi = np->vsi;
3417 struct ice_pf *pf = vsi->back;
3418 u8 count = 0;
3419
3420 if (new_mtu == netdev->mtu) {
3968540b 3421 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
e94d4478
AV
3422 return 0;
3423 }
3424
3425 if (new_mtu < netdev->min_mtu) {
3426 netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
3427 netdev->min_mtu);
3428 return -EINVAL;
3429 } else if (new_mtu > netdev->max_mtu) {
3430 netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
3431 netdev->min_mtu);
3432 return -EINVAL;
3433 }
3434 /* if a reset is in progress, wait for some time for it to complete */
3435 do {
5df7e45d 3436 if (ice_is_reset_in_progress(pf->state)) {
e94d4478
AV
3437 count++;
3438 usleep_range(1000, 2000);
3439 } else {
3440 break;
3441 }
3442
3443 } while (count < 100);
3444
3445 if (count == 100) {
3446 netdev_err(netdev, "can't change mtu. Device is busy\n");
3447 return -EBUSY;
3448 }
3449
3450 netdev->mtu = new_mtu;
3451
3452 /* if VSI is up, bring it down and then back up */
3453 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
3454 int err;
3455
3456 err = ice_down(vsi);
3457 if (err) {
3458 netdev_err(netdev, "change mtu if_up err %d\n", err);
3459 return err;
3460 }
3461
3462 err = ice_up(vsi);
3463 if (err) {
3464 netdev_err(netdev, "change mtu if_up err %d\n", err);
3465 return err;
3466 }
3467 }
3468
3469 netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
3470 return 0;
3471}
3472
d76a60ba
AV
3473/**
3474 * ice_set_rss - Set RSS keys and lut
3475 * @vsi: Pointer to VSI structure
3476 * @seed: RSS hash seed
3477 * @lut: Lookup table
3478 * @lut_size: Lookup table size
3479 *
3480 * Returns 0 on success, negative on failure
3481 */
3482int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
3483{
3484 struct ice_pf *pf = vsi->back;
3485 struct ice_hw *hw = &pf->hw;
3486 enum ice_status status;
3487
3488 if (seed) {
3489 struct ice_aqc_get_set_rss_keys *buf =
3490 (struct ice_aqc_get_set_rss_keys *)seed;
3491
4fb33f31 3492 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
d76a60ba
AV
3493
3494 if (status) {
3495 dev_err(&pf->pdev->dev,
3496 "Cannot set RSS key, err %d aq_err %d\n",
3497 status, hw->adminq.rq_last_status);
3498 return -EIO;
3499 }
3500 }
3501
3502 if (lut) {
4fb33f31
AV
3503 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
3504 lut, lut_size);
d76a60ba
AV
3505 if (status) {
3506 dev_err(&pf->pdev->dev,
3507 "Cannot set RSS lut, err %d aq_err %d\n",
3508 status, hw->adminq.rq_last_status);
3509 return -EIO;
3510 }
3511 }
3512
3513 return 0;
3514}
3515
3516/**
3517 * ice_get_rss - Get RSS keys and lut
3518 * @vsi: Pointer to VSI structure
3519 * @seed: Buffer to store the keys
3520 * @lut: Buffer to store the lookup table entries
3521 * @lut_size: Size of buffer to store the lookup table entries
3522 *
3523 * Returns 0 on success, negative on failure
3524 */
3525int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
3526{
3527 struct ice_pf *pf = vsi->back;
3528 struct ice_hw *hw = &pf->hw;
3529 enum ice_status status;
3530
3531 if (seed) {
3532 struct ice_aqc_get_set_rss_keys *buf =
3533 (struct ice_aqc_get_set_rss_keys *)seed;
3534
4fb33f31 3535 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
d76a60ba
AV
3536 if (status) {
3537 dev_err(&pf->pdev->dev,
3538 "Cannot get RSS key, err %d aq_err %d\n",
3539 status, hw->adminq.rq_last_status);
3540 return -EIO;
3541 }
3542 }
3543
3544 if (lut) {
4fb33f31
AV
3545 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
3546 lut, lut_size);
d76a60ba
AV
3547 if (status) {
3548 dev_err(&pf->pdev->dev,
3549 "Cannot get RSS lut, err %d aq_err %d\n",
3550 status, hw->adminq.rq_last_status);
3551 return -EIO;
3552 }
3553 }
3554
3555 return 0;
3556}
3557
b1edc14a
MFIP
3558/**
3559 * ice_bridge_getlink - Get the hardware bridge mode
3560 * @skb: skb buff
3561 * @pid: process id
3562 * @seq: RTNL message seq
3563 * @dev: the netdev being configured
3564 * @filter_mask: filter mask passed in
3565 * @nlflags: netlink flags passed in
3566 *
3567 * Return the bridge mode (VEB/VEPA)
3568 */
3569static int
3570ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3571 struct net_device *dev, u32 filter_mask, int nlflags)
3572{
3573 struct ice_netdev_priv *np = netdev_priv(dev);
3574 struct ice_vsi *vsi = np->vsi;
3575 struct ice_pf *pf = vsi->back;
3576 u16 bmode;
3577
3578 bmode = pf->first_sw->bridge_mode;
3579
3580 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
3581 filter_mask, NULL);
3582}
3583
3584/**
3585 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
3586 * @vsi: Pointer to VSI structure
3587 * @bmode: Hardware bridge mode (VEB/VEPA)
3588 *
3589 * Returns 0 on success, negative on failure
3590 */
3591static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
3592{
3593 struct device *dev = &vsi->back->pdev->dev;
3594 struct ice_aqc_vsi_props *vsi_props;
3595 struct ice_hw *hw = &vsi->back->hw;
3596 struct ice_vsi_ctx ctxt = { 0 };
3597 enum ice_status status;
3598
3599 vsi_props = &vsi->info;
3600 ctxt.info = vsi->info;
3601
3602 if (bmode == BRIDGE_MODE_VEB)
3603 /* change from VEPA to VEB mode */
3604 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
3605 else
3606 /* change from VEB to VEPA mode */
3607 ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
b1edc14a 3608 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
5726ca0e
AV
3609
3610 status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
b1edc14a
MFIP
3611 if (status) {
3612 dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
3613 bmode, status, hw->adminq.sq_last_status);
3614 return -EIO;
3615 }
3616 /* Update sw flags for book keeping */
3617 vsi_props->sw_flags = ctxt.info.sw_flags;
3618
3619 return 0;
3620}
3621
3622/**
3623 * ice_bridge_setlink - Set the hardware bridge mode
3624 * @dev: the netdev being configured
3625 * @nlh: RTNL message
3626 * @flags: bridge setlink flags
3627 *
3628 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
3629 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
3630 * not already set for all VSIs connected to this switch. And also update the
3631 * unicast switch filter rules for the corresponding switch of the netdev.
3632 */
3633static int
3634ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
3635 u16 __always_unused flags)
3636{
3637 struct ice_netdev_priv *np = netdev_priv(dev);
3638 struct ice_pf *pf = np->vsi->back;
3639 struct nlattr *attr, *br_spec;
3640 struct ice_hw *hw = &pf->hw;
3641 enum ice_status status;
3642 struct ice_sw *pf_sw;
3643 int rem, v, err = 0;
3644
3645 pf_sw = pf->first_sw;
3646 /* find the attribute in the netlink message */
3647 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3648
3649 nla_for_each_nested(attr, br_spec, rem) {
3650 __u16 mode;
3651
3652 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3653 continue;
3654 mode = nla_get_u16(attr);
3655 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3656 return -EINVAL;
3657 /* Continue if bridge mode is not being flipped */
3658 if (mode == pf_sw->bridge_mode)
3659 continue;
3660 /* Iterates through the PF VSI list and update the loopback
3661 * mode of the VSI
3662 */
3663 ice_for_each_vsi(pf, v) {
3664 if (!pf->vsi[v])
3665 continue;
3666 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
3667 if (err)
3668 return err;
3669 }
3670
3671 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
3672 /* Update the unicast switch filter rules for the corresponding
3673 * switch of the netdev
3674 */
3675 status = ice_update_sw_rule_bridge_mode(hw);
3676 if (status) {
3677 netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
3678 mode, status, hw->adminq.sq_last_status);
3679 /* revert hw->evb_veb */
3680 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
3681 return -EIO;
3682 }
3683
3684 pf_sw->bridge_mode = mode;
3685 }
3686
3687 return 0;
3688}
3689
b3969fd7
SM
3690/**
3691 * ice_tx_timeout - Respond to a Tx Hang
3692 * @netdev: network interface device structure
3693 */
3694static void ice_tx_timeout(struct net_device *netdev)
3695{
3696 struct ice_netdev_priv *np = netdev_priv(netdev);
3697 struct ice_ring *tx_ring = NULL;
3698 struct ice_vsi *vsi = np->vsi;
3699 struct ice_pf *pf = vsi->back;
3700 u32 head, val = 0, i;
3701 int hung_queue = -1;
3702
3703 pf->tx_timeout_count++;
3704
3705 /* find the stopped queue the same way the stack does */
3706 for (i = 0; i < netdev->num_tx_queues; i++) {
3707 struct netdev_queue *q;
3708 unsigned long trans_start;
3709
3710 q = netdev_get_tx_queue(netdev, i);
3711 trans_start = q->trans_start;
3712 if (netif_xmit_stopped(q) &&
3713 time_after(jiffies,
3714 (trans_start + netdev->watchdog_timeo))) {
3715 hung_queue = i;
3716 break;
3717 }
3718 }
3719
3720 if (i == netdev->num_tx_queues) {
3721 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
3722 } else {
3723 /* now that we have an index, find the tx_ring struct */
3724 for (i = 0; i < vsi->num_txq; i++) {
3725 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
3726 if (hung_queue ==
3727 vsi->tx_rings[i]->q_index) {
3728 tx_ring = vsi->tx_rings[i];
3729 break;
3730 }
3731 }
3732 }
3733 }
3734
3735 /* Reset recovery level if enough time has elapsed after last timeout.
3736 * Also ensure no new reset action happens before next timeout period.
3737 */
3738 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
3739 pf->tx_timeout_recovery_level = 1;
3740 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
3741 netdev->watchdog_timeo)))
3742 return;
3743
3744 if (tx_ring) {
3745 head = tx_ring->next_to_clean;
3746 /* Read interrupt register */
3747 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3748 val = rd32(&pf->hw,
3749 GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
eb0208ec 3750 tx_ring->vsi->hw_base_vector));
b3969fd7
SM
3751
3752 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
3753 vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
3754 head, tx_ring->next_to_use,
3755 readl(tx_ring->tail), val);
3756 }
3757
3758 pf->tx_timeout_last_recovery = jiffies;
3759 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
3760 pf->tx_timeout_recovery_level, hung_queue);
3761
3762 switch (pf->tx_timeout_recovery_level) {
3763 case 1:
3764 set_bit(__ICE_PFR_REQ, pf->state);
3765 break;
3766 case 2:
3767 set_bit(__ICE_CORER_REQ, pf->state);
3768 break;
3769 case 3:
3770 set_bit(__ICE_GLOBR_REQ, pf->state);
3771 break;
3772 default:
3773 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
3774 set_bit(__ICE_DOWN, pf->state);
3775 set_bit(__ICE_NEEDS_RESTART, vsi->state);
8d81fa55 3776 set_bit(__ICE_SERVICE_DIS, pf->state);
b3969fd7
SM
3777 break;
3778 }
3779
3780 ice_service_task_schedule(pf);
3781 pf->tx_timeout_recovery_level++;
3782}
3783
cdedef59
AV
3784/**
3785 * ice_open - Called when a network interface becomes active
3786 * @netdev: network interface device structure
3787 *
3788 * The open entry point is called when a network interface is made
3789 * active by the system (IFF_UP). At this point all resources needed
3790 * for transmit and receive operations are allocated, the interrupt
3791 * handler is registered with the OS, the netdev watchdog is enabled,
3792 * and the stack is notified that the interface is ready.
3793 *
3794 * Returns 0 on success, negative value on failure
3795 */
3796static int ice_open(struct net_device *netdev)
3797{
3798 struct ice_netdev_priv *np = netdev_priv(netdev);
3799 struct ice_vsi *vsi = np->vsi;
3800 int err;
3801
0f9d5027
AV
3802 if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
3803 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
3804 return -EIO;
3805 }
3806
cdedef59
AV
3807 netif_carrier_off(netdev);
3808
3809 err = ice_vsi_open(vsi);
3810
3811 if (err)
3812 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
3813 vsi->vsi_num, vsi->vsw->sw_id);
3814 return err;
3815}
3816
3817/**
3818 * ice_stop - Disables a network interface
3819 * @netdev: network interface device structure
3820 *
3821 * The stop entry point is called when an interface is de-activated by the OS,
3822 * and the netdevice enters the DOWN state. The hardware is still under the
3823 * driver's control, but the netdev interface is disabled.
3824 *
3825 * Returns success only - not allowed to fail
3826 */
3827static int ice_stop(struct net_device *netdev)
3828{
3829 struct ice_netdev_priv *np = netdev_priv(netdev);
3830 struct ice_vsi *vsi = np->vsi;
3831
3832 ice_vsi_close(vsi);
3833
3834 return 0;
3835}
3836
e94d4478
AV
3837/**
3838 * ice_features_check - Validate encapsulated packet conforms to limits
3839 * @skb: skb buffer
3840 * @netdev: This port's netdev
3841 * @features: Offload features that the stack believes apply
3842 */
3843static netdev_features_t
3844ice_features_check(struct sk_buff *skb,
3845 struct net_device __always_unused *netdev,
3846 netdev_features_t features)
3847{
3848 size_t len;
3849
3850 /* No point in doing any of this if neither checksum nor GSO are
3851 * being requested for this frame. We can rule out both by just
3852 * checking for CHECKSUM_PARTIAL
3853 */
3854 if (skb->ip_summed != CHECKSUM_PARTIAL)
3855 return features;
3856
3857 /* We cannot support GSO if the MSS is going to be less than
3858 * 64 bytes. If it is then we need to drop support for GSO.
3859 */
3860 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3861 features &= ~NETIF_F_GSO_MASK;
3862
3863 len = skb_network_header(skb) - skb->data;
3864 if (len & ~(ICE_TXD_MACLEN_MAX))
3865 goto out_rm_features;
3866
3867 len = skb_transport_header(skb) - skb_network_header(skb);
3868 if (len & ~(ICE_TXD_IPLEN_MAX))
3869 goto out_rm_features;
3870
3871 if (skb->encapsulation) {
3872 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3873 if (len & ~(ICE_TXD_L4LEN_MAX))
3874 goto out_rm_features;
3875
3876 len = skb_inner_transport_header(skb) -
3877 skb_inner_network_header(skb);
3878 if (len & ~(ICE_TXD_IPLEN_MAX))
3879 goto out_rm_features;
3880 }
3881
3882 return features;
3883out_rm_features:
3884 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3885}
3886
cdedef59
AV
3887static const struct net_device_ops ice_netdev_ops = {
3888 .ndo_open = ice_open,
3889 .ndo_stop = ice_stop,
2b245cb2 3890 .ndo_start_xmit = ice_start_xmit,
e94d4478
AV
3891 .ndo_features_check = ice_features_check,
3892 .ndo_set_rx_mode = ice_set_rx_mode,
3893 .ndo_set_mac_address = ice_set_mac_address,
3894 .ndo_validate_addr = eth_validate_addr,
3895 .ndo_change_mtu = ice_change_mtu,
fcea6f3d 3896 .ndo_get_stats64 = ice_get_stats64,
7c710869
AV
3897 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
3898 .ndo_set_vf_mac = ice_set_vf_mac,
3899 .ndo_get_vf_config = ice_get_vf_cfg,
3900 .ndo_set_vf_trust = ice_set_vf_trust,
3901 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
3902 .ndo_set_vf_link_state = ice_set_vf_link_state,
d76a60ba
AV
3903 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
3904 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
3905 .ndo_set_features = ice_set_features,
b1edc14a
MFIP
3906 .ndo_bridge_getlink = ice_bridge_getlink,
3907 .ndo_bridge_setlink = ice_bridge_setlink,
e94d4478
AV
3908 .ndo_fdb_add = ice_fdb_add,
3909 .ndo_fdb_del = ice_fdb_del,
b3969fd7 3910 .ndo_tx_timeout = ice_tx_timeout,
cdedef59 3911};