Merge branch 'afs-dh' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / drivers / net / ethernet / intel / ice / ice_ethtool.c
CommitLineData
fcea6f3d
AV
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* ethtool support for ice */
5
6#include "ice.h"
7
8struct ice_stats {
9 char stat_string[ETH_GSTRING_LEN];
10 int sizeof_stat;
11 int stat_offset;
12};
13
14#define ICE_STAT(_type, _name, _stat) { \
15 .stat_string = _name, \
16 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
17 .stat_offset = offsetof(_type, _stat) \
18}
19
20#define ICE_VSI_STAT(_name, _stat) \
21 ICE_STAT(struct ice_vsi, _name, _stat)
22#define ICE_PF_STAT(_name, _stat) \
23 ICE_STAT(struct ice_pf, _name, _stat)
24
25static int ice_q_stats_len(struct net_device *netdev)
26{
27 struct ice_netdev_priv *np = netdev_priv(netdev);
28
29 return ((np->vsi->num_txq + np->vsi->num_rxq) *
30 (sizeof(struct ice_q_stats) / sizeof(u64)));
31}
32
33#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
34#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
35
36#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
37 ice_q_stats_len(n))
38
39static const struct ice_stats ice_gstrings_vsi_stats[] = {
40 ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
41 ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
42 ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
43 ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
44 ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
45 ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
46 ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
47 ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
48 ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
49 ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
50 ICE_VSI_STAT("tx_linearize", tx_linearize),
51 ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
52 ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
53 ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
54};
55
56/* These PF_STATs might look like duplicates of some NETDEV_STATs,
57 * but they aren't. This device is capable of supporting multiple
58 * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
59 * netdevs whereas the PF_STATs are for the physical function that's
60 * hosting these netdevs.
61 *
62 * The PF_STATs are appended to the netdev stats only when ethtool -S
63 * is queried on the base PF netdev.
64 */
65static struct ice_stats ice_gstrings_pf_stats[] = {
66 ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
67 ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
68 ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
69 ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
70 ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
71 ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
72 ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
73 ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
74 ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
75 ICE_PF_STAT("tx_size_64", stats.tx_size_64),
76 ICE_PF_STAT("rx_size_64", stats.rx_size_64),
77 ICE_PF_STAT("tx_size_127", stats.tx_size_127),
78 ICE_PF_STAT("rx_size_127", stats.rx_size_127),
79 ICE_PF_STAT("tx_size_255", stats.tx_size_255),
80 ICE_PF_STAT("rx_size_255", stats.rx_size_255),
81 ICE_PF_STAT("tx_size_511", stats.tx_size_511),
82 ICE_PF_STAT("rx_size_511", stats.rx_size_511),
83 ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
84 ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
85 ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
86 ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
87 ICE_PF_STAT("tx_size_big", stats.tx_size_big),
88 ICE_PF_STAT("rx_size_big", stats.rx_size_big),
89 ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
90 ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
91 ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
92 ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
93 ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
94 ICE_PF_STAT("rx_undersize", stats.rx_undersize),
95 ICE_PF_STAT("rx_fragments", stats.rx_fragments),
96 ICE_PF_STAT("rx_oversize", stats.rx_oversize),
97 ICE_PF_STAT("rx_jabber", stats.rx_jabber),
98 ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
99 ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
100 ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
101 ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
102 ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
103 ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
104 ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
105};
106
107static u32 ice_regs_dump_list[] = {
108 PFGEN_STATE,
109 PRTGEN_STATUS,
110 QRX_CTRL(0),
111 QINT_TQCTL(0),
112 QINT_RQCTL(0),
113 PFINT_OICR_ENA,
114 QRX_ITR(0),
115};
116
117/**
118 * ice_nvm_version_str - format the NVM version strings
119 * @hw: ptr to the hardware info
120 */
121static char *ice_nvm_version_str(struct ice_hw *hw)
122{
123 static char buf[ICE_ETHTOOL_FWVER_LEN];
124 u8 ver, patch;
125 u32 full_ver;
126 u16 build;
127
128 full_ver = hw->nvm.oem_ver;
129 ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
130 build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
131 ICE_OEM_VER_BUILD_SHIFT);
132 patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
133
134 snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
135 (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
136 (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
137 hw->nvm.eetrack, ver, build, patch);
138
139 return buf;
140}
141
142static void
143ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
144{
145 struct ice_netdev_priv *np = netdev_priv(netdev);
146 struct ice_vsi *vsi = np->vsi;
147 struct ice_pf *pf = vsi->back;
148
149 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
150 strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
151 strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
152 sizeof(drvinfo->fw_version));
153 strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
154 sizeof(drvinfo->bus_info));
155}
156
157static int ice_get_regs_len(struct net_device __always_unused *netdev)
158{
cba5957d 159 return sizeof(ice_regs_dump_list);
fcea6f3d
AV
160}
161
162static void
163ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
164{
165 struct ice_netdev_priv *np = netdev_priv(netdev);
166 struct ice_pf *pf = np->vsi->back;
167 struct ice_hw *hw = &pf->hw;
168 u32 *regs_buf = (u32 *)p;
169 int i;
170
171 regs->version = 1;
172
cba5957d 173 for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
fcea6f3d
AV
174 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
175}
176
177static u32 ice_get_msglevel(struct net_device *netdev)
178{
179 struct ice_netdev_priv *np = netdev_priv(netdev);
180 struct ice_pf *pf = np->vsi->back;
181
182#ifndef CONFIG_DYNAMIC_DEBUG
183 if (pf->hw.debug_mask)
184 netdev_info(netdev, "hw debug_mask: 0x%llX\n",
185 pf->hw.debug_mask);
186#endif /* !CONFIG_DYNAMIC_DEBUG */
187
188 return pf->msg_enable;
189}
190
191static void ice_set_msglevel(struct net_device *netdev, u32 data)
192{
193 struct ice_netdev_priv *np = netdev_priv(netdev);
194 struct ice_pf *pf = np->vsi->back;
195
196#ifndef CONFIG_DYNAMIC_DEBUG
197 if (ICE_DBG_USER & data)
198 pf->hw.debug_mask = data;
199 else
200 pf->msg_enable = data;
201#else
202 pf->msg_enable = data;
203#endif /* !CONFIG_DYNAMIC_DEBUG */
204}
205
206static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
207{
208 struct ice_netdev_priv *np = netdev_priv(netdev);
209 struct ice_vsi *vsi = np->vsi;
210 char *p = (char *)data;
211 unsigned int i;
212
213 switch (stringset) {
214 case ETH_SS_STATS:
215 for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
216 snprintf(p, ETH_GSTRING_LEN, "%s",
217 ice_gstrings_vsi_stats[i].stat_string);
218 p += ETH_GSTRING_LEN;
219 }
220
221 ice_for_each_txq(vsi, i) {
222 snprintf(p, ETH_GSTRING_LEN,
223 "tx-queue-%u.tx_packets", i);
224 p += ETH_GSTRING_LEN;
225 snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
226 p += ETH_GSTRING_LEN;
227 }
228
229 ice_for_each_rxq(vsi, i) {
230 snprintf(p, ETH_GSTRING_LEN,
231 "rx-queue-%u.rx_packets", i);
232 p += ETH_GSTRING_LEN;
233 snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
234 p += ETH_GSTRING_LEN;
235 }
236
237 if (vsi->type != ICE_VSI_PF)
238 return;
239
240 for (i = 0; i < ICE_PF_STATS_LEN; i++) {
241 snprintf(p, ETH_GSTRING_LEN, "port.%s",
242 ice_gstrings_pf_stats[i].stat_string);
243 p += ETH_GSTRING_LEN;
244 }
245
246 break;
247 default:
248 break;
249 }
250}
251
252static int ice_get_sset_count(struct net_device *netdev, int sset)
253{
254 switch (sset) {
255 case ETH_SS_STATS:
256 return ICE_ALL_STATS_LEN(netdev);
257 default:
258 return -EOPNOTSUPP;
259 }
260}
261
262static void
263ice_get_ethtool_stats(struct net_device *netdev,
264 struct ethtool_stats __always_unused *stats, u64 *data)
265{
266 struct ice_netdev_priv *np = netdev_priv(netdev);
267 struct ice_vsi *vsi = np->vsi;
268 struct ice_pf *pf = vsi->back;
269 struct ice_ring *ring;
270 unsigned int j = 0;
271 int i = 0;
272 char *p;
273
274 for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
275 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
276 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
277 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
278 }
279
280 /* populate per queue stats */
281 rcu_read_lock();
282
283 ice_for_each_txq(vsi, j) {
284 ring = READ_ONCE(vsi->tx_rings[j]);
285 if (!ring)
286 continue;
287 data[i++] = ring->stats.pkts;
288 data[i++] = ring->stats.bytes;
289 }
290
291 ice_for_each_rxq(vsi, j) {
292 ring = READ_ONCE(vsi->rx_rings[j]);
293 data[i++] = ring->stats.pkts;
294 data[i++] = ring->stats.bytes;
295 }
296
297 rcu_read_unlock();
298
299 if (vsi->type != ICE_VSI_PF)
300 return;
301
302 for (j = 0; j < ICE_PF_STATS_LEN; j++) {
303 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
304 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
305 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
306 }
307}
308
309static int
310ice_get_link_ksettings(struct net_device *netdev,
311 struct ethtool_link_ksettings *ks)
312{
313 struct ice_netdev_priv *np = netdev_priv(netdev);
314 struct ice_link_status *hw_link_info;
315 struct ice_vsi *vsi = np->vsi;
316 bool link_up;
317
318 hw_link_info = &vsi->port_info->phy.link_info;
319 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
320
321 ethtool_link_ksettings_add_link_mode(ks, supported,
322 10000baseT_Full);
323 ethtool_link_ksettings_add_link_mode(ks, advertising,
324 10000baseT_Full);
325
326 /* set speed and duplex */
327 if (link_up) {
328 switch (hw_link_info->link_speed) {
329 case ICE_AQ_LINK_SPEED_100MB:
330 ks->base.speed = SPEED_100;
331 break;
332 case ICE_AQ_LINK_SPEED_2500MB:
333 ks->base.speed = SPEED_2500;
334 break;
335 case ICE_AQ_LINK_SPEED_5GB:
336 ks->base.speed = SPEED_5000;
337 break;
338 case ICE_AQ_LINK_SPEED_10GB:
339 ks->base.speed = SPEED_10000;
340 break;
341 case ICE_AQ_LINK_SPEED_25GB:
342 ks->base.speed = SPEED_25000;
343 break;
344 case ICE_AQ_LINK_SPEED_40GB:
345 ks->base.speed = SPEED_40000;
346 break;
347 default:
348 ks->base.speed = SPEED_UNKNOWN;
349 break;
350 }
351
352 ks->base.duplex = DUPLEX_FULL;
353 } else {
354 ks->base.speed = SPEED_UNKNOWN;
355 ks->base.duplex = DUPLEX_UNKNOWN;
356 }
357
358 /* set autoneg settings */
359 ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
360 AUTONEG_ENABLE : AUTONEG_DISABLE);
361
362 /* set media type settings */
363 switch (vsi->port_info->phy.media_type) {
364 case ICE_MEDIA_FIBER:
365 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
366 ks->base.port = PORT_FIBRE;
367 break;
368 case ICE_MEDIA_BASET:
369 ethtool_link_ksettings_add_link_mode(ks, supported, TP);
370 ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
371 ks->base.port = PORT_TP;
372 break;
373 case ICE_MEDIA_BACKPLANE:
374 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
375 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
376 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
377 ethtool_link_ksettings_add_link_mode(ks, advertising,
378 Backplane);
379 ks->base.port = PORT_NONE;
380 break;
381 case ICE_MEDIA_DA:
382 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
383 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
384 ks->base.port = PORT_DA;
385 break;
386 default:
387 ks->base.port = PORT_OTHER;
388 break;
389 }
390
391 /* flow control is symmetric and always supported */
392 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
393
394 switch (vsi->port_info->fc.req_mode) {
395 case ICE_FC_FULL:
396 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
397 break;
398 case ICE_FC_TX_PAUSE:
399 ethtool_link_ksettings_add_link_mode(ks, advertising,
400 Asym_Pause);
401 break;
402 case ICE_FC_RX_PAUSE:
403 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
404 ethtool_link_ksettings_add_link_mode(ks, advertising,
405 Asym_Pause);
406 break;
407 case ICE_FC_PFC:
408 default:
409 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
410 ethtool_link_ksettings_del_link_mode(ks, advertising,
411 Asym_Pause);
412 break;
413 }
414
415 return 0;
416}
417
418/**
419 * ice_get_rxnfc - command to get RX flow classification rules
420 * @netdev: network interface device structure
421 * @cmd: ethtool rxnfc command
422 * @rule_locs: buffer to rturn Rx flow classification rules
423 *
424 * Returns Success if the command is supported.
425 */
426static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
427 u32 __always_unused *rule_locs)
428{
429 struct ice_netdev_priv *np = netdev_priv(netdev);
430 struct ice_vsi *vsi = np->vsi;
431 int ret = -EOPNOTSUPP;
432
433 switch (cmd->cmd) {
434 case ETHTOOL_GRXRINGS:
435 cmd->data = vsi->rss_size;
436 ret = 0;
437 break;
438 default:
439 break;
440 }
441
442 return ret;
443}
444
445static void
446ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
447{
448 struct ice_netdev_priv *np = netdev_priv(netdev);
449 struct ice_vsi *vsi = np->vsi;
450
451 ring->rx_max_pending = ICE_MAX_NUM_DESC;
452 ring->tx_max_pending = ICE_MAX_NUM_DESC;
453 ring->rx_pending = vsi->rx_rings[0]->count;
454 ring->tx_pending = vsi->tx_rings[0]->count;
455 ring->rx_mini_pending = ICE_MIN_NUM_DESC;
456 ring->rx_mini_max_pending = 0;
457 ring->rx_jumbo_max_pending = 0;
458 ring->rx_jumbo_pending = 0;
459}
460
461static int
462ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
463{
464 struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
465 struct ice_netdev_priv *np = netdev_priv(netdev);
466 struct ice_vsi *vsi = np->vsi;
467 struct ice_pf *pf = vsi->back;
468 int i, timeout = 50, err = 0;
469 u32 new_rx_cnt, new_tx_cnt;
470
471 if (ring->tx_pending > ICE_MAX_NUM_DESC ||
472 ring->tx_pending < ICE_MIN_NUM_DESC ||
473 ring->rx_pending > ICE_MAX_NUM_DESC ||
474 ring->rx_pending < ICE_MIN_NUM_DESC) {
475 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
476 ring->tx_pending, ring->rx_pending,
477 ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
478 return -EINVAL;
479 }
480
481 new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
482 new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
483
484 /* if nothing to do return success */
485 if (new_tx_cnt == vsi->tx_rings[0]->count &&
486 new_rx_cnt == vsi->rx_rings[0]->count) {
487 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
488 return 0;
489 }
490
491 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
492 timeout--;
493 if (!timeout)
494 return -EBUSY;
495 usleep_range(1000, 2000);
496 }
497
498 /* set for the next time the netdev is started */
499 if (!netif_running(vsi->netdev)) {
500 for (i = 0; i < vsi->alloc_txq; i++)
501 vsi->tx_rings[i]->count = new_tx_cnt;
502 for (i = 0; i < vsi->alloc_rxq; i++)
503 vsi->rx_rings[i]->count = new_rx_cnt;
504 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
505 goto done;
506 }
507
508 if (new_tx_cnt == vsi->tx_rings[0]->count)
509 goto process_rx;
510
511 /* alloc updated Tx resources */
512 netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
513 vsi->tx_rings[0]->count, new_tx_cnt);
514
515 tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
516 sizeof(struct ice_ring), GFP_KERNEL);
517 if (!tx_rings) {
518 err = -ENOMEM;
519 goto done;
520 }
521
522 for (i = 0; i < vsi->num_txq; i++) {
523 /* clone ring and setup updated count */
524 tx_rings[i] = *vsi->tx_rings[i];
525 tx_rings[i].count = new_tx_cnt;
526 tx_rings[i].desc = NULL;
527 tx_rings[i].tx_buf = NULL;
528 err = ice_setup_tx_ring(&tx_rings[i]);
529 if (err) {
530 while (i) {
531 i--;
532 ice_clean_tx_ring(&tx_rings[i]);
533 }
534 devm_kfree(&pf->pdev->dev, tx_rings);
535 goto done;
536 }
537 }
538
539process_rx:
540 if (new_rx_cnt == vsi->rx_rings[0]->count)
541 goto process_link;
542
543 /* alloc updated Rx resources */
544 netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
545 vsi->rx_rings[0]->count, new_rx_cnt);
546
547 rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
548 sizeof(struct ice_ring), GFP_KERNEL);
549 if (!rx_rings) {
550 err = -ENOMEM;
551 goto done;
552 }
553
554 for (i = 0; i < vsi->num_rxq; i++) {
555 /* clone ring and setup updated count */
556 rx_rings[i] = *vsi->rx_rings[i];
557 rx_rings[i].count = new_rx_cnt;
558 rx_rings[i].desc = NULL;
559 rx_rings[i].rx_buf = NULL;
560 /* this is to allow wr32 to have something to write to
561 * during early allocation of Rx buffers
562 */
563 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
564
565 err = ice_setup_rx_ring(&rx_rings[i]);
566 if (err)
567 goto rx_unwind;
568
569 /* allocate Rx buffers */
570 err = ice_alloc_rx_bufs(&rx_rings[i],
571 ICE_DESC_UNUSED(&rx_rings[i]));
572rx_unwind:
573 if (err) {
574 while (i) {
575 i--;
576 ice_free_rx_ring(&rx_rings[i]);
577 }
578 devm_kfree(&pf->pdev->dev, rx_rings);
579 err = -ENOMEM;
580 goto free_tx;
581 }
582 }
583
584process_link:
585 /* Bring interface down, copy in the new ring info, then restore the
586 * interface. if VSI is up, bring it down and then back up
587 */
588 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
589 ice_down(vsi);
590
591 if (tx_rings) {
592 for (i = 0; i < vsi->alloc_txq; i++) {
593 ice_free_tx_ring(vsi->tx_rings[i]);
594 *vsi->tx_rings[i] = tx_rings[i];
595 }
596 devm_kfree(&pf->pdev->dev, tx_rings);
597 }
598
599 if (rx_rings) {
600 for (i = 0; i < vsi->alloc_rxq; i++) {
601 ice_free_rx_ring(vsi->rx_rings[i]);
602 /* copy the real tail offset */
603 rx_rings[i].tail = vsi->rx_rings[i]->tail;
604 /* this is to fake out the allocation routine
605 * into thinking it has to realloc everything
606 * but the recycling logic will let us re-use
607 * the buffers allocated above
608 */
609 rx_rings[i].next_to_use = 0;
610 rx_rings[i].next_to_clean = 0;
611 rx_rings[i].next_to_alloc = 0;
612 *vsi->rx_rings[i] = rx_rings[i];
613 }
614 devm_kfree(&pf->pdev->dev, rx_rings);
615 }
616
617 ice_up(vsi);
618 }
619 goto done;
620
621free_tx:
622 /* error cleanup if the Rx allocations failed after getting Tx */
623 if (tx_rings) {
624 for (i = 0; i < vsi->alloc_txq; i++)
625 ice_free_tx_ring(&tx_rings[i]);
626 devm_kfree(&pf->pdev->dev, tx_rings);
627 }
628
629done:
630 clear_bit(__ICE_CFG_BUSY, pf->state);
631 return err;
632}
633
634static int ice_nway_reset(struct net_device *netdev)
635{
636 /* restart autonegotiation */
637 struct ice_netdev_priv *np = netdev_priv(netdev);
638 struct ice_link_status *hw_link_info;
639 struct ice_vsi *vsi = np->vsi;
640 struct ice_port_info *pi;
641 enum ice_status status;
642 bool link_up;
643
644 pi = vsi->port_info;
645 hw_link_info = &pi->phy.link_info;
646 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
647
648 status = ice_aq_set_link_restart_an(pi, link_up, NULL);
649 if (status) {
650 netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
651 status, pi->hw->adminq.sq_last_status);
652 return -EIO;
653 }
654
655 return 0;
656}
657
658/**
659 * ice_get_pauseparam - Get Flow Control status
660 * @netdev: network interface device structure
661 * @pause: ethernet pause (flow control) parameters
662 */
663static void
664ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
665{
666 struct ice_netdev_priv *np = netdev_priv(netdev);
667 struct ice_port_info *pi;
668
669 pi = np->vsi->port_info;
670 pause->autoneg =
671 ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
672 AUTONEG_ENABLE : AUTONEG_DISABLE);
673
674 if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
675 pause->rx_pause = 1;
676 } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
677 pause->tx_pause = 1;
678 } else if (pi->fc.current_mode == ICE_FC_FULL) {
679 pause->rx_pause = 1;
680 pause->tx_pause = 1;
681 }
682}
683
684/**
685 * ice_set_pauseparam - Set Flow Control parameter
686 * @netdev: network interface device structure
687 * @pause: return tx/rx flow control status
688 */
689static int
690ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
691{
692 struct ice_netdev_priv *np = netdev_priv(netdev);
693 struct ice_link_status *hw_link_info;
694 struct ice_pf *pf = np->vsi->back;
695 struct ice_vsi *vsi = np->vsi;
696 struct ice_hw *hw = &pf->hw;
697 struct ice_port_info *pi;
698 enum ice_status status;
699 u8 aq_failures;
700 bool link_up;
701 int err = 0;
702
703 pi = vsi->port_info;
704 hw_link_info = &pi->phy.link_info;
705 link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
706
707 /* Changing the port's flow control is not supported if this isn't the
708 * PF VSI
709 */
710 if (vsi->type != ICE_VSI_PF) {
711 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
712 return -EOPNOTSUPP;
713 }
714
715 if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
716 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
717 return -EOPNOTSUPP;
718 }
719
720 /* If we have link and don't have autoneg */
721 if (!test_bit(__ICE_DOWN, pf->state) &&
722 !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
723 /* Send message that it might not necessarily work*/
724 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
725 }
726
727 if (pause->rx_pause && pause->tx_pause)
728 pi->fc.req_mode = ICE_FC_FULL;
729 else if (pause->rx_pause && !pause->tx_pause)
730 pi->fc.req_mode = ICE_FC_RX_PAUSE;
731 else if (!pause->rx_pause && pause->tx_pause)
732 pi->fc.req_mode = ICE_FC_TX_PAUSE;
733 else if (!pause->rx_pause && !pause->tx_pause)
734 pi->fc.req_mode = ICE_FC_NONE;
735 else
736 return -EINVAL;
737
738 /* Tell the OS link is going down, the link will go back up when fw
739 * says it is ready asynchronously
740 */
741 ice_print_link_msg(vsi, false);
742 netif_carrier_off(netdev);
743 netif_tx_stop_all_queues(netdev);
744
745 /* Set the FC mode and only restart AN if link is up */
746 status = ice_set_fc(pi, &aq_failures, link_up);
747
748 if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
749 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
750 status, hw->adminq.sq_last_status);
751 err = -EAGAIN;
752 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
753 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
754 status, hw->adminq.sq_last_status);
755 err = -EAGAIN;
756 } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
757 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
758 status, hw->adminq.sq_last_status);
759 err = -EAGAIN;
760 }
761
762 if (!test_bit(__ICE_DOWN, pf->state)) {
763 /* Give it a little more time to try to come back */
764 msleep(75);
765 if (!test_bit(__ICE_DOWN, pf->state))
766 return ice_nway_reset(netdev);
767 }
768
769 return err;
770}
771
772/**
773 * ice_get_rxfh_key_size - get the RSS hash key size
774 * @netdev: network interface device structure
775 *
776 * Returns the table size.
777 */
778static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
779{
780 return ICE_VSIQF_HKEY_ARRAY_SIZE;
781}
782
783/**
784 * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
785 * @netdev: network interface device structure
786 *
787 * Returns the table size.
788 */
789static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
790{
791 struct ice_netdev_priv *np = netdev_priv(netdev);
792
793 return np->vsi->rss_table_size;
794}
795
796/**
797 * ice_get_rxfh - get the rx flow hash indirection table
798 * @netdev: network interface device structure
799 * @indir: indirection table
800 * @key: hash key
801 * @hfunc: hash function
802 *
803 * Reads the indirection table directly from the hardware.
804 */
805static int
806ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
807{
808 struct ice_netdev_priv *np = netdev_priv(netdev);
809 struct ice_vsi *vsi = np->vsi;
810 struct ice_pf *pf = vsi->back;
811 int ret = 0, i;
812 u8 *lut;
813
814 if (hfunc)
815 *hfunc = ETH_RSS_HASH_TOP;
816
817 if (!indir)
818 return 0;
819
820 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
821 /* RSS not supported return error here */
822 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
823 return -EIO;
824 }
825
826 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
827 if (!lut)
828 return -ENOMEM;
829
830 if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
831 ret = -EIO;
832 goto out;
833 }
834
835 for (i = 0; i < vsi->rss_table_size; i++)
836 indir[i] = (u32)(lut[i]);
837
838out:
839 devm_kfree(&pf->pdev->dev, lut);
840 return ret;
841}
842
843/**
844 * ice_set_rxfh - set the rx flow hash indirection table
845 * @netdev: network interface device structure
846 * @indir: indirection table
847 * @key: hash key
848 * @hfunc: hash function
849 *
850 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
851 * returns 0 after programming the table.
852 */
853static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
854 const u8 *key, const u8 hfunc)
855{
856 struct ice_netdev_priv *np = netdev_priv(netdev);
857 struct ice_vsi *vsi = np->vsi;
858 struct ice_pf *pf = vsi->back;
859 u8 *seed = NULL;
860
861 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
862 return -EOPNOTSUPP;
863
864 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
865 /* RSS not supported return error here */
866 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
867 return -EIO;
868 }
869
870 if (key) {
871 if (!vsi->rss_hkey_user) {
872 vsi->rss_hkey_user =
873 devm_kzalloc(&pf->pdev->dev,
874 ICE_VSIQF_HKEY_ARRAY_SIZE,
875 GFP_KERNEL);
876 if (!vsi->rss_hkey_user)
877 return -ENOMEM;
878 }
879 memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
880 seed = vsi->rss_hkey_user;
881 }
882
883 if (!vsi->rss_lut_user) {
884 vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
885 vsi->rss_table_size,
886 GFP_KERNEL);
887 if (!vsi->rss_lut_user)
888 return -ENOMEM;
889 }
890
891 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
892 if (indir) {
893 int i;
894
895 for (i = 0; i < vsi->rss_table_size; i++)
896 vsi->rss_lut_user[i] = (u8)(indir[i]);
897 } else {
898 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
899 vsi->rss_size);
900 }
901
902 if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
903 return -EIO;
904
905 return 0;
906}
907
908static const struct ethtool_ops ice_ethtool_ops = {
909 .get_link_ksettings = ice_get_link_ksettings,
910 .get_drvinfo = ice_get_drvinfo,
911 .get_regs_len = ice_get_regs_len,
912 .get_regs = ice_get_regs,
913 .get_msglevel = ice_get_msglevel,
914 .set_msglevel = ice_set_msglevel,
915 .get_link = ethtool_op_get_link,
916 .get_strings = ice_get_strings,
917 .get_ethtool_stats = ice_get_ethtool_stats,
918 .get_sset_count = ice_get_sset_count,
919 .get_rxnfc = ice_get_rxnfc,
920 .get_ringparam = ice_get_ringparam,
921 .set_ringparam = ice_set_ringparam,
922 .nway_reset = ice_nway_reset,
923 .get_pauseparam = ice_get_pauseparam,
924 .set_pauseparam = ice_set_pauseparam,
925 .get_rxfh_key_size = ice_get_rxfh_key_size,
926 .get_rxfh_indir_size = ice_get_rxfh_indir_size,
927 .get_rxfh = ice_get_rxfh,
928 .set_rxfh = ice_set_rxfh,
929};
930
931/**
932 * ice_set_ethtool_ops - setup netdev ethtool ops
933 * @netdev: network interface device structure
934 *
935 * setup netdev ethtool ops with ice specific ops
936 */
937void ice_set_ethtool_ops(struct net_device *netdev)
938{
939 netdev->ethtool_ops = &ice_ethtool_ops;
940}