1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/bitops.h>
12 #include <linux/ctype.h>
13 #include <linux/stringify.h>
14 #include <linux/ethtool.h>
15 #include <linux/ethtool_netlink.h>
16 #include <linux/linkmode.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
20 #include <linux/crc32.h>
21 #include <linux/firmware.h>
22 #include <linux/utsname.h>
23 #include <linux/time.h>
24 #include <linux/ptp_clock_kernel.h>
25 #include <linux/net_tstamp.h>
26 #include <linux/timecounter.h>
27 #include <net/netlink.h>
30 #include "bnxt_hwrm.h"
34 #include "bnxt_ethtool.h"
35 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
36 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
37 #include "bnxt_coredump.h"
39 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \
42 NL_SET_ERR_MSG_MOD(extack, msg); \
43 netdev_err(dev, "%s\n", msg); \
46 static u32 bnxt_get_msglevel(struct net_device *dev)
48 struct bnxt *bp = netdev_priv(dev);
50 return bp->msg_enable;
53 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
55 struct bnxt *bp = netdev_priv(dev);
57 bp->msg_enable = value;
60 static int bnxt_get_coalesce(struct net_device *dev,
61 struct ethtool_coalesce *coal,
62 struct kernel_ethtool_coalesce *kernel_coal,
63 struct netlink_ext_ack *extack)
65 struct bnxt *bp = netdev_priv(dev);
66 struct bnxt_coal *hw_coal;
69 memset(coal, 0, sizeof(*coal));
71 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
73 hw_coal = &bp->rx_coal;
74 mult = hw_coal->bufs_per_record;
75 coal->rx_coalesce_usecs = hw_coal->coal_ticks;
76 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
77 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
78 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
80 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
81 kernel_coal->use_cqe_mode_rx = true;
83 hw_coal = &bp->tx_coal;
84 mult = hw_coal->bufs_per_record;
85 coal->tx_coalesce_usecs = hw_coal->coal_ticks;
86 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
87 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
88 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
90 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
91 kernel_coal->use_cqe_mode_tx = true;
93 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
98 static int bnxt_set_coalesce(struct net_device *dev,
99 struct ethtool_coalesce *coal,
100 struct kernel_ethtool_coalesce *kernel_coal,
101 struct netlink_ext_ack *extack)
103 struct bnxt *bp = netdev_priv(dev);
104 bool update_stats = false;
105 struct bnxt_coal *hw_coal;
109 if (coal->use_adaptive_rx_coalesce) {
110 bp->flags |= BNXT_FLAG_DIM;
112 if (bp->flags & BNXT_FLAG_DIM) {
113 bp->flags &= ~(BNXT_FLAG_DIM);
118 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
119 !(bp->coal_cap.cmpl_params &
120 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
123 hw_coal = &bp->rx_coal;
124 mult = hw_coal->bufs_per_record;
125 hw_coal->coal_ticks = coal->rx_coalesce_usecs;
126 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
127 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
128 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
130 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
131 if (kernel_coal->use_cqe_mode_rx)
133 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
135 hw_coal = &bp->tx_coal;
136 mult = hw_coal->bufs_per_record;
137 hw_coal->coal_ticks = coal->tx_coalesce_usecs;
138 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
139 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
140 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
142 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
143 if (kernel_coal->use_cqe_mode_tx)
145 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
147 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
148 u32 stats_ticks = coal->stats_block_coalesce_usecs;
150 /* Allow 0, which means disable. */
152 stats_ticks = clamp_t(u32, stats_ticks,
153 BNXT_MIN_STATS_COAL_TICKS,
154 BNXT_MAX_STATS_COAL_TICKS);
155 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
156 bp->stats_coal_ticks = stats_ticks;
157 if (bp->stats_coal_ticks)
158 bp->current_interval =
159 bp->stats_coal_ticks * HZ / 1000000;
161 bp->current_interval = BNXT_TIMER_INTERVAL;
166 if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
168 bnxt_close_nic(bp, true, false);
169 rc = bnxt_open_nic(bp, true, false);
171 rc = bnxt_hwrm_set_coal(bp);
178 static const char * const bnxt_ring_rx_stats_str[] = {
189 static const char * const bnxt_ring_tx_stats_str[] = {
200 static const char * const bnxt_ring_tpa_stats_str[] = {
207 static const char * const bnxt_ring_tpa2_stats_str[] = {
208 "rx_tpa_eligible_pkt",
209 "rx_tpa_eligible_bytes",
216 static const char * const bnxt_rx_sw_stats_str[] = {
222 static const char * const bnxt_cmn_sw_stats_str[] = {
226 #define BNXT_RX_STATS_ENTRY(counter) \
227 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
229 #define BNXT_TX_STATS_ENTRY(counter) \
230 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
232 #define BNXT_RX_STATS_EXT_ENTRY(counter) \
233 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
235 #define BNXT_TX_STATS_EXT_ENTRY(counter) \
236 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
238 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
239 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
242 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
243 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
246 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \
247 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
248 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
249 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
250 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
251 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
252 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
253 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
254 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
256 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \
257 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
258 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
259 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
260 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
261 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
262 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
263 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
264 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
266 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
267 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
268 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
270 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
271 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
272 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
274 #define BNXT_RX_STATS_EXT_COS_ENTRIES \
275 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
276 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
277 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
278 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
279 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
280 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
281 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
282 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
284 #define BNXT_TX_STATS_EXT_COS_ENTRIES \
285 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
286 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
287 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
288 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
289 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
290 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
291 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
292 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
294 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
295 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
298 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
299 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
308 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
309 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
310 __stringify(counter##_pri##n) }
312 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
313 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
314 __stringify(counter##_pri##n) }
316 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \
317 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
318 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
319 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
320 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
321 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
322 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
323 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
324 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
326 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \
327 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
328 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
329 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
330 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
331 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
332 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
333 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
334 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
342 static const char *const bnxt_ring_err_stats_arr[] = {
343 "rx_total_l4_csum_errors",
345 "rx_total_buf_errors",
346 "rx_total_oom_discards",
347 "rx_total_netpoll_discards",
348 "rx_total_ring_discards",
350 "tx_total_ring_discards",
354 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
355 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
356 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
357 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
359 static const struct {
361 char string[ETH_GSTRING_LEN];
362 } bnxt_port_stats_arr[] = {
363 BNXT_RX_STATS_ENTRY(rx_64b_frames),
364 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
365 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
366 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
367 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
368 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
369 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
370 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
371 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
372 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
373 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
374 BNXT_RX_STATS_ENTRY(rx_total_frames),
375 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
376 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
377 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
378 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
379 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
380 BNXT_RX_STATS_ENTRY(rx_pause_frames),
381 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
382 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
383 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
384 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
385 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
386 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
387 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
388 BNXT_RX_STATS_ENTRY(rx_good_frames),
389 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
397 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
398 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
400 BNXT_RX_STATS_ENTRY(rx_bytes),
401 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
402 BNXT_RX_STATS_ENTRY(rx_runt_frames),
403 BNXT_RX_STATS_ENTRY(rx_stat_discard),
404 BNXT_RX_STATS_ENTRY(rx_stat_err),
406 BNXT_TX_STATS_ENTRY(tx_64b_frames),
407 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
408 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
409 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
410 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
411 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
412 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
413 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
414 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
415 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
416 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
417 BNXT_TX_STATS_ENTRY(tx_good_frames),
418 BNXT_TX_STATS_ENTRY(tx_total_frames),
419 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
420 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
421 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
422 BNXT_TX_STATS_ENTRY(tx_pause_frames),
423 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
424 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
425 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
426 BNXT_TX_STATS_ENTRY(tx_err),
427 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
428 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
436 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
438 BNXT_TX_STATS_ENTRY(tx_total_collisions),
439 BNXT_TX_STATS_ENTRY(tx_bytes),
440 BNXT_TX_STATS_ENTRY(tx_xthol_frames),
441 BNXT_TX_STATS_ENTRY(tx_stat_discard),
442 BNXT_TX_STATS_ENTRY(tx_stat_error),
445 static const struct {
447 char string[ETH_GSTRING_LEN];
448 } bnxt_port_stats_ext_arr[] = {
449 BNXT_RX_STATS_EXT_ENTRY(link_down_events),
450 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
451 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
452 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
453 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
454 BNXT_RX_STATS_EXT_COS_ENTRIES,
455 BNXT_RX_STATS_EXT_PFC_ENTRIES,
456 BNXT_RX_STATS_EXT_ENTRY(rx_bits),
457 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
458 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
459 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
460 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
461 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
463 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
466 static const struct {
468 char string[ETH_GSTRING_LEN];
469 } bnxt_tx_port_stats_ext_arr[] = {
470 BNXT_TX_STATS_EXT_COS_ENTRIES,
471 BNXT_TX_STATS_EXT_PFC_ENTRIES,
474 static const struct {
476 char string[ETH_GSTRING_LEN];
477 } bnxt_rx_bytes_pri_arr[] = {
478 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
481 static const struct {
483 char string[ETH_GSTRING_LEN];
484 } bnxt_rx_pkts_pri_arr[] = {
485 BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
488 static const struct {
490 char string[ETH_GSTRING_LEN];
491 } bnxt_tx_bytes_pri_arr[] = {
492 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
495 static const struct {
497 char string[ETH_GSTRING_LEN];
498 } bnxt_tx_pkts_pri_arr[] = {
499 BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
502 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr)
503 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
504 #define BNXT_NUM_STATS_PRI \
505 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
506 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
507 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
508 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
510 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
512 if (BNXT_SUPPORTS_TPA(bp)) {
513 if (bp->max_tpa_v2) {
514 if (BNXT_CHIP_P5(bp))
515 return BNXT_NUM_TPA_RING_STATS_P5;
516 return BNXT_NUM_TPA_RING_STATS_P7;
518 return BNXT_NUM_TPA_RING_STATS;
523 static int bnxt_get_num_ring_stats(struct bnxt *bp)
527 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
528 bnxt_get_num_tpa_ring_stats(bp);
529 tx = NUM_RING_TX_HW_STATS;
530 cmn = NUM_RING_CMN_SW_STATS;
531 return rx * bp->rx_nr_rings +
532 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
533 cmn * bp->cp_nr_rings;
536 static int bnxt_get_num_stats(struct bnxt *bp)
538 int num_stats = bnxt_get_num_ring_stats(bp);
541 num_stats += BNXT_NUM_RING_ERR_STATS;
543 if (bp->flags & BNXT_FLAG_PORT_STATS)
544 num_stats += BNXT_NUM_PORT_STATS;
546 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
547 len = min_t(int, bp->fw_rx_stats_ext_size,
548 ARRAY_SIZE(bnxt_port_stats_ext_arr));
550 len = min_t(int, bp->fw_tx_stats_ext_size,
551 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
553 if (bp->pri2cos_valid)
554 num_stats += BNXT_NUM_STATS_PRI;
560 static int bnxt_get_sset_count(struct net_device *dev, int sset)
562 struct bnxt *bp = netdev_priv(dev);
566 return bnxt_get_num_stats(bp);
570 return bp->num_tests;
576 static bool is_rx_ring(struct bnxt *bp, int ring_num)
578 return ring_num < bp->rx_nr_rings;
581 static bool is_tx_ring(struct bnxt *bp, int ring_num)
585 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
586 tx_base = bp->rx_nr_rings;
588 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
593 static void bnxt_get_ethtool_stats(struct net_device *dev,
594 struct ethtool_stats *stats, u64 *buf)
596 struct bnxt_total_ring_err_stats ring_err_stats = {0};
597 struct bnxt *bp = netdev_priv(dev);
603 j += bnxt_get_num_ring_stats(bp);
604 goto skip_ring_stats;
607 tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
608 for (i = 0; i < bp->cp_nr_rings; i++) {
609 struct bnxt_napi *bnapi = bp->bnapi[i];
610 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
611 u64 *sw_stats = cpr->stats.sw_stats;
615 if (is_rx_ring(bp, i)) {
616 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
617 buf[j] = sw_stats[k];
619 if (is_tx_ring(bp, i)) {
620 k = NUM_RING_RX_HW_STATS;
621 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
623 buf[j] = sw_stats[k];
625 if (!tpa_stats || !is_rx_ring(bp, i))
626 goto skip_tpa_ring_stats;
628 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
629 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
631 buf[j] = sw_stats[k];
634 sw = (u64 *)&cpr->sw_stats.rx;
635 if (is_rx_ring(bp, i)) {
636 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
640 sw = (u64 *)&cpr->sw_stats.cmn;
641 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
645 bnxt_get_ring_err_stats(bp, &ring_err_stats);
648 curr = &ring_err_stats.rx_total_l4_csum_errors;
649 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
650 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
651 buf[j] = *curr + *prev;
653 if (bp->flags & BNXT_FLAG_PORT_STATS) {
654 u64 *port_stats = bp->port_stats.sw_stats;
656 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
657 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
659 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
660 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
661 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
664 len = min_t(u32, bp->fw_rx_stats_ext_size,
665 ARRAY_SIZE(bnxt_port_stats_ext_arr));
666 for (i = 0; i < len; i++, j++) {
667 buf[j] = *(rx_port_stats_ext +
668 bnxt_port_stats_ext_arr[i].offset);
670 len = min_t(u32, bp->fw_tx_stats_ext_size,
671 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
672 for (i = 0; i < len; i++, j++) {
673 buf[j] = *(tx_port_stats_ext +
674 bnxt_tx_port_stats_ext_arr[i].offset);
676 if (bp->pri2cos_valid) {
677 for (i = 0; i < 8; i++, j++) {
678 long n = bnxt_rx_bytes_pri_arr[i].base_off +
681 buf[j] = *(rx_port_stats_ext + n);
683 for (i = 0; i < 8; i++, j++) {
684 long n = bnxt_rx_pkts_pri_arr[i].base_off +
687 buf[j] = *(rx_port_stats_ext + n);
689 for (i = 0; i < 8; i++, j++) {
690 long n = bnxt_tx_bytes_pri_arr[i].base_off +
693 buf[j] = *(tx_port_stats_ext + n);
695 for (i = 0; i < 8; i++, j++) {
696 long n = bnxt_tx_pkts_pri_arr[i].base_off +
699 buf[j] = *(tx_port_stats_ext + n);
705 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
707 struct bnxt *bp = netdev_priv(dev);
708 static const char * const *str;
713 for (i = 0; i < bp->cp_nr_rings; i++) {
714 if (is_rx_ring(bp, i)) {
715 num_str = NUM_RING_RX_HW_STATS;
716 for (j = 0; j < num_str; j++) {
717 sprintf(buf, "[%d]: %s", i,
718 bnxt_ring_rx_stats_str[j]);
719 buf += ETH_GSTRING_LEN;
722 if (is_tx_ring(bp, i)) {
723 num_str = NUM_RING_TX_HW_STATS;
724 for (j = 0; j < num_str; j++) {
725 sprintf(buf, "[%d]: %s", i,
726 bnxt_ring_tx_stats_str[j]);
727 buf += ETH_GSTRING_LEN;
730 num_str = bnxt_get_num_tpa_ring_stats(bp);
731 if (!num_str || !is_rx_ring(bp, i))
735 str = bnxt_ring_tpa2_stats_str;
737 str = bnxt_ring_tpa_stats_str;
739 for (j = 0; j < num_str; j++) {
740 sprintf(buf, "[%d]: %s", i, str[j]);
741 buf += ETH_GSTRING_LEN;
744 if (is_rx_ring(bp, i)) {
745 num_str = NUM_RING_RX_SW_STATS;
746 for (j = 0; j < num_str; j++) {
747 sprintf(buf, "[%d]: %s", i,
748 bnxt_rx_sw_stats_str[j]);
749 buf += ETH_GSTRING_LEN;
752 num_str = NUM_RING_CMN_SW_STATS;
753 for (j = 0; j < num_str; j++) {
754 sprintf(buf, "[%d]: %s", i,
755 bnxt_cmn_sw_stats_str[j]);
756 buf += ETH_GSTRING_LEN;
759 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
760 strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
761 buf += ETH_GSTRING_LEN;
764 if (bp->flags & BNXT_FLAG_PORT_STATS) {
765 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
766 strcpy(buf, bnxt_port_stats_arr[i].string);
767 buf += ETH_GSTRING_LEN;
770 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
773 len = min_t(u32, bp->fw_rx_stats_ext_size,
774 ARRAY_SIZE(bnxt_port_stats_ext_arr));
775 for (i = 0; i < len; i++) {
776 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
777 buf += ETH_GSTRING_LEN;
779 len = min_t(u32, bp->fw_tx_stats_ext_size,
780 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
781 for (i = 0; i < len; i++) {
783 bnxt_tx_port_stats_ext_arr[i].string);
784 buf += ETH_GSTRING_LEN;
786 if (bp->pri2cos_valid) {
787 for (i = 0; i < 8; i++) {
789 bnxt_rx_bytes_pri_arr[i].string);
790 buf += ETH_GSTRING_LEN;
792 for (i = 0; i < 8; i++) {
794 bnxt_rx_pkts_pri_arr[i].string);
795 buf += ETH_GSTRING_LEN;
797 for (i = 0; i < 8; i++) {
799 bnxt_tx_bytes_pri_arr[i].string);
800 buf += ETH_GSTRING_LEN;
802 for (i = 0; i < 8; i++) {
804 bnxt_tx_pkts_pri_arr[i].string);
805 buf += ETH_GSTRING_LEN;
812 memcpy(buf, bp->test_info->string,
813 bp->num_tests * ETH_GSTRING_LEN);
816 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
822 static void bnxt_get_ringparam(struct net_device *dev,
823 struct ethtool_ringparam *ering,
824 struct kernel_ethtool_ringparam *kernel_ering,
825 struct netlink_ext_ack *extack)
827 struct bnxt *bp = netdev_priv(dev);
829 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
830 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
831 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
832 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
834 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
835 ering->rx_jumbo_max_pending = 0;
836 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
838 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
840 ering->rx_pending = bp->rx_ring_size;
841 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
842 ering->tx_pending = bp->tx_ring_size;
845 static int bnxt_set_ringparam(struct net_device *dev,
846 struct ethtool_ringparam *ering,
847 struct kernel_ethtool_ringparam *kernel_ering,
848 struct netlink_ext_ack *extack)
850 struct bnxt *bp = netdev_priv(dev);
852 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
853 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
854 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
857 if (netif_running(dev))
858 bnxt_close_nic(bp, false, false);
860 bp->rx_ring_size = ering->rx_pending;
861 bp->tx_ring_size = ering->tx_pending;
862 bnxt_set_ring_params(bp);
864 if (netif_running(dev))
865 return bnxt_open_nic(bp, false, false);
870 static void bnxt_get_channels(struct net_device *dev,
871 struct ethtool_channels *channel)
873 struct bnxt *bp = netdev_priv(dev);
874 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
875 int max_rx_rings, max_tx_rings, tcs;
876 int max_tx_sch_inputs, tx_grps;
878 /* Get the most up-to-date max_tx_sch_inputs. */
879 if (netif_running(dev) && BNXT_NEW_RM(bp))
880 bnxt_hwrm_func_resc_qcaps(bp, false);
881 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
883 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
884 if (max_tx_sch_inputs)
885 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
887 tcs = netdev_get_num_tc(dev);
888 tx_grps = max(tcs, 1);
889 if (bp->tx_nr_rings_xdp)
891 max_tx_rings /= tx_grps;
892 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
894 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
898 if (max_tx_sch_inputs)
899 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
904 channel->max_rx = max_rx_rings;
905 channel->max_tx = max_tx_rings;
906 channel->max_other = 0;
907 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
908 channel->combined_count = bp->rx_nr_rings;
909 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
910 channel->combined_count--;
912 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
913 channel->rx_count = bp->rx_nr_rings;
914 channel->tx_count = bp->tx_nr_rings_per_tc;
919 static int bnxt_set_channels(struct net_device *dev,
920 struct ethtool_channels *channel)
922 struct bnxt *bp = netdev_priv(dev);
923 int req_tx_rings, req_rx_rings, tcs;
929 if (channel->other_count)
932 if (!channel->combined_count &&
933 (!channel->rx_count || !channel->tx_count))
936 if (channel->combined_count &&
937 (channel->rx_count || channel->tx_count))
940 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
944 if (channel->combined_count)
947 tcs = netdev_get_num_tc(dev);
949 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
950 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
951 if (bp->tx_nr_rings_xdp) {
953 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
956 tx_xdp = req_rx_rings;
958 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
960 netdev_warn(dev, "Unable to allocate the requested rings\n");
964 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
965 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
966 netif_is_rxfh_configured(dev)) {
967 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
971 if (netif_running(dev)) {
973 /* TODO CHIMP_FW: Send message to all VF's
977 bnxt_close_nic(bp, true, false);
981 bp->flags |= BNXT_FLAG_SHARED_RINGS;
982 bp->rx_nr_rings = channel->combined_count;
983 bp->tx_nr_rings_per_tc = channel->combined_count;
985 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
986 bp->rx_nr_rings = channel->rx_count;
987 bp->tx_nr_rings_per_tc = channel->tx_count;
989 bp->tx_nr_rings_xdp = tx_xdp;
990 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
992 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
994 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
995 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
996 tx_cp + bp->rx_nr_rings;
998 /* After changing number of rx channels, update NTUPLE feature. */
999 netdev_update_features(dev);
1000 if (netif_running(dev)) {
1001 rc = bnxt_open_nic(bp, true, false);
1002 if ((!rc) && BNXT_PF(bp)) {
1003 /* TODO CHIMP_FW: Send message to all VF's
1008 rc = bnxt_reserve_rings(bp, true);
1014 #ifdef CONFIG_RFS_ACCEL
1015 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1020 cmd->data = bp->ntp_fltr_count;
1021 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
1022 struct hlist_head *head;
1023 struct bnxt_ntuple_filter *fltr;
1025 head = &bp->ntp_fltr_hash_tbl[i];
1027 hlist_for_each_entry_rcu(fltr, head, base.hash) {
1028 if (j == cmd->rule_cnt)
1030 rule_locs[j++] = fltr->base.sw_id;
1033 if (j == cmd->rule_cnt)
1040 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1042 struct ethtool_rx_flow_spec *fs =
1043 (struct ethtool_rx_flow_spec *)&cmd->fs;
1044 struct bnxt_ntuple_filter *fltr;
1045 struct flow_keys *fkeys;
1046 int i, rc = -EINVAL;
1048 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
1051 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
1052 struct hlist_head *head;
1054 head = &bp->ntp_fltr_hash_tbl[i];
1056 hlist_for_each_entry_rcu(fltr, head, base.hash) {
1057 if (fltr->base.sw_id == fs->location)
1065 fkeys = &fltr->fkeys;
1066 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1067 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1068 fs->flow_type = TCP_V4_FLOW;
1069 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1070 fs->flow_type = UDP_V4_FLOW;
1074 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1075 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1077 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1078 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1080 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1081 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1083 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1084 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1088 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1089 fs->flow_type = TCP_V6_FLOW;
1090 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1091 fs->flow_type = UDP_V6_FLOW;
1095 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1096 fkeys->addrs.v6addrs.src;
1097 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1098 fkeys->addrs.v6addrs.dst;
1099 for (i = 0; i < 4; i++) {
1100 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
1101 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
1103 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1104 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1106 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1107 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1110 fs->ring_cookie = fltr->base.rxq;
1120 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1122 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1123 return RXH_IP_SRC | RXH_IP_DST;
1127 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1129 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1130 return RXH_IP_SRC | RXH_IP_DST;
1134 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1137 switch (cmd->flow_type) {
1139 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1140 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1141 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1142 cmd->data |= get_ethtool_ipv4_rss(bp);
1145 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1146 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1147 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1150 case AH_ESP_V4_FLOW:
1154 cmd->data |= get_ethtool_ipv4_rss(bp);
1158 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1159 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1160 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1161 cmd->data |= get_ethtool_ipv6_rss(bp);
1164 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1165 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1166 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1169 case AH_ESP_V6_FLOW:
1173 cmd->data |= get_ethtool_ipv6_rss(bp);
1179 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1180 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1182 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1184 u32 rss_hash_cfg = bp->rss_hash_cfg;
1187 if (cmd->data == RXH_4TUPLE)
1189 else if (cmd->data == RXH_2TUPLE)
1191 else if (!cmd->data)
1196 if (cmd->flow_type == TCP_V4_FLOW) {
1197 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1199 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1200 } else if (cmd->flow_type == UDP_V4_FLOW) {
1201 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1203 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1205 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1206 } else if (cmd->flow_type == TCP_V6_FLOW) {
1207 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1209 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1210 } else if (cmd->flow_type == UDP_V6_FLOW) {
1211 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1213 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1215 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1216 } else if (tuple == 4) {
1220 switch (cmd->flow_type) {
1224 case AH_ESP_V4_FLOW:
1229 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1231 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1237 case AH_ESP_V6_FLOW:
1242 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1244 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1248 if (bp->rss_hash_cfg == rss_hash_cfg)
1251 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
1252 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1253 bp->rss_hash_cfg = rss_hash_cfg;
1254 if (netif_running(bp->dev)) {
1255 bnxt_close_nic(bp, false, false);
1256 rc = bnxt_open_nic(bp, false, false);
1261 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1264 struct bnxt *bp = netdev_priv(dev);
1268 #ifdef CONFIG_RFS_ACCEL
1269 case ETHTOOL_GRXRINGS:
1270 cmd->data = bp->rx_nr_rings;
1273 case ETHTOOL_GRXCLSRLCNT:
1274 cmd->rule_cnt = bp->ntp_fltr_count;
1275 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1278 case ETHTOOL_GRXCLSRLALL:
1279 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1282 case ETHTOOL_GRXCLSRULE:
1283 rc = bnxt_grxclsrule(bp, cmd);
1288 rc = bnxt_grxfh(bp, cmd);
1299 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1301 struct bnxt *bp = netdev_priv(dev);
1306 rc = bnxt_srxfh(bp, cmd);
1316 u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1318 struct bnxt *bp = netdev_priv(dev);
1320 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1321 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
1322 return HW_HASH_INDEX_SIZE;
1325 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1327 return HW_HASH_KEY_SIZE;
1330 static int bnxt_get_rxfh(struct net_device *dev,
1331 struct ethtool_rxfh_param *rxfh)
1333 struct bnxt *bp = netdev_priv(dev);
1334 struct bnxt_vnic_info *vnic;
1337 rxfh->hfunc = ETH_RSS_HASH_TOP;
1342 vnic = &bp->vnic_info[0];
1343 if (rxfh->indir && bp->rss_indir_tbl) {
1344 tbl_size = bnxt_get_rxfh_indir_size(dev);
1345 for (i = 0; i < tbl_size; i++)
1346 rxfh->indir[i] = bp->rss_indir_tbl[i];
1349 if (rxfh->key && vnic->rss_hash_key)
1350 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1355 static int bnxt_set_rxfh(struct net_device *dev,
1356 struct ethtool_rxfh_param *rxfh,
1357 struct netlink_ext_ack *extack)
1359 struct bnxt *bp = netdev_priv(dev);
1362 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
1369 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1371 for (i = 0; i < tbl_size; i++)
1372 bp->rss_indir_tbl[i] = rxfh->indir[i];
1373 pad = bp->rss_indir_tbl_entries - tbl_size;
1375 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1378 if (netif_running(bp->dev)) {
1379 bnxt_close_nic(bp, false, false);
1380 rc = bnxt_open_nic(bp, false, false);
1385 static void bnxt_get_drvinfo(struct net_device *dev,
1386 struct ethtool_drvinfo *info)
1388 struct bnxt *bp = netdev_priv(dev);
1390 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1391 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1392 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1393 info->n_stats = bnxt_get_num_stats(bp);
1394 info->testinfo_len = bp->num_tests;
1395 /* TODO CHIMP_FW: eeprom dump details */
1396 info->eedump_len = 0;
1397 /* TODO CHIMP FW: reg dump details */
1398 info->regdump_len = 0;
1401 static int bnxt_get_regs_len(struct net_device *dev)
1403 struct bnxt *bp = netdev_priv(dev);
1409 reg_len = BNXT_PXP_REG_LEN;
1411 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1412 reg_len += sizeof(struct pcie_ctx_hw_stats);
1417 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1420 struct pcie_ctx_hw_stats *hw_pcie_stats;
1421 struct hwrm_pcie_qstats_input *req;
1422 struct bnxt *bp = netdev_priv(dev);
1423 dma_addr_t hw_pcie_stats_addr;
1427 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1429 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1432 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
1435 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
1436 &hw_pcie_stats_addr);
1437 if (!hw_pcie_stats) {
1438 hwrm_req_drop(bp, req);
1443 hwrm_req_hold(bp, req); /* hold on to slice */
1444 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1445 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1446 rc = hwrm_req_send(bp, req);
1448 __le64 *src = (__le64 *)hw_pcie_stats;
1449 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1452 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1453 dst[i] = le64_to_cpu(src[i]);
1455 hwrm_req_drop(bp, req);
1458 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1460 struct bnxt *bp = netdev_priv(dev);
1464 memset(&wol->sopass, 0, sizeof(wol->sopass));
1465 if (bp->flags & BNXT_FLAG_WOL_CAP) {
1466 wol->supported = WAKE_MAGIC;
1468 wol->wolopts = WAKE_MAGIC;
1472 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1474 struct bnxt *bp = netdev_priv(dev);
1476 if (wol->wolopts & ~WAKE_MAGIC)
1479 if (wol->wolopts & WAKE_MAGIC) {
1480 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1483 if (bnxt_hwrm_alloc_wol_fltr(bp))
1489 if (bnxt_hwrm_free_wol_fltr(bp))
1497 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1501 /* TODO: support 25GB, 40GB, 50GB with different cable type */
1502 /* set the advertised speeds */
1503 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1504 speed_mask |= ADVERTISED_100baseT_Full;
1505 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1506 speed_mask |= ADVERTISED_1000baseT_Full;
1507 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1508 speed_mask |= ADVERTISED_2500baseX_Full;
1509 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1510 speed_mask |= ADVERTISED_10000baseT_Full;
1511 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1512 speed_mask |= ADVERTISED_40000baseCR4_Full;
1514 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1515 speed_mask |= ADVERTISED_Pause;
1516 else if (fw_pause & BNXT_LINK_PAUSE_TX)
1517 speed_mask |= ADVERTISED_Asym_Pause;
1518 else if (fw_pause & BNXT_LINK_PAUSE_RX)
1519 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1524 enum bnxt_media_type {
1525 BNXT_MEDIA_UNKNOWN = 0,
1529 BNXT_MEDIA_LR_ER_FR,
1536 static const enum bnxt_media_type bnxt_phy_types[] = {
1537 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
1538 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR,
1539 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
1540 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
1541 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
1542 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
1543 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
1544 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
1545 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
1546 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
1547 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
1548 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
1549 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
1550 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
1551 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
1552 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1553 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1554 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
1555 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
1556 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
1557 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1558 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1559 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
1560 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
1561 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
1562 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
1563 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
1564 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
1565 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1566 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1567 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
1568 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
1569 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1570 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1571 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
1572 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
1573 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1574 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1575 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
1576 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
1577 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
1578 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
1579 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
1580 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
1581 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
1582 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
1583 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
1584 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
1585 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
1586 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
1587 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
1588 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
1589 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
1590 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
1593 static enum bnxt_media_type
1594 bnxt_get_media(struct bnxt_link_info *link_info)
1596 switch (link_info->media_type) {
1597 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
1598 return BNXT_MEDIA_TP;
1599 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
1600 return BNXT_MEDIA_CR;
1602 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
1603 return bnxt_phy_types[link_info->phy_type];
1604 return BNXT_MEDIA_UNKNOWN;
1608 enum bnxt_link_speed_indices {
1609 BNXT_LINK_SPEED_UNKNOWN = 0,
1610 BNXT_LINK_SPEED_100MB_IDX,
1611 BNXT_LINK_SPEED_1GB_IDX,
1612 BNXT_LINK_SPEED_10GB_IDX,
1613 BNXT_LINK_SPEED_25GB_IDX,
1614 BNXT_LINK_SPEED_40GB_IDX,
1615 BNXT_LINK_SPEED_50GB_IDX,
1616 BNXT_LINK_SPEED_100GB_IDX,
1617 BNXT_LINK_SPEED_200GB_IDX,
1618 BNXT_LINK_SPEED_400GB_IDX,
1619 __BNXT_LINK_SPEED_END
1622 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
1625 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
1626 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
1627 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
1628 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
1629 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
1630 case BNXT_LINK_SPEED_50GB:
1631 case BNXT_LINK_SPEED_50GB_PAM4:
1632 return BNXT_LINK_SPEED_50GB_IDX;
1633 case BNXT_LINK_SPEED_100GB:
1634 case BNXT_LINK_SPEED_100GB_PAM4:
1635 case BNXT_LINK_SPEED_100GB_PAM4_112:
1636 return BNXT_LINK_SPEED_100GB_IDX;
1637 case BNXT_LINK_SPEED_200GB:
1638 case BNXT_LINK_SPEED_200GB_PAM4:
1639 case BNXT_LINK_SPEED_200GB_PAM4_112:
1640 return BNXT_LINK_SPEED_200GB_IDX;
1641 case BNXT_LINK_SPEED_400GB:
1642 case BNXT_LINK_SPEED_400GB_PAM4:
1643 case BNXT_LINK_SPEED_400GB_PAM4_112:
1644 return BNXT_LINK_SPEED_400GB_IDX;
1645 default: return BNXT_LINK_SPEED_UNKNOWN;
1649 static const enum ethtool_link_mode_bit_indices
1650 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
1651 [BNXT_LINK_SPEED_100MB_IDX] = {
1653 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1656 [BNXT_LINK_SPEED_1GB_IDX] = {
1658 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1659 /* historically baseT, but DAC is more correctly baseX */
1660 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1661 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1662 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1663 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1666 [BNXT_LINK_SPEED_10GB_IDX] = {
1668 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1669 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1670 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1671 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1672 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1673 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1676 [BNXT_LINK_SPEED_25GB_IDX] = {
1678 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1679 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1680 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1683 [BNXT_LINK_SPEED_40GB_IDX] = {
1685 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1686 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1687 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1688 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1691 [BNXT_LINK_SPEED_50GB_IDX] = {
1692 [BNXT_SIG_MODE_NRZ] = {
1693 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1694 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1695 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1697 [BNXT_SIG_MODE_PAM4] = {
1698 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
1699 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
1700 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1701 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
1704 [BNXT_LINK_SPEED_100GB_IDX] = {
1705 [BNXT_SIG_MODE_NRZ] = {
1706 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1707 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1708 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1709 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1711 [BNXT_SIG_MODE_PAM4] = {
1712 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
1713 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
1714 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
1715 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
1717 [BNXT_SIG_MODE_PAM4_112] = {
1718 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
1719 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
1720 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
1721 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
1724 [BNXT_LINK_SPEED_200GB_IDX] = {
1725 [BNXT_SIG_MODE_PAM4] = {
1726 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1727 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1728 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1729 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1731 [BNXT_SIG_MODE_PAM4_112] = {
1732 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
1733 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
1734 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
1735 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
1738 [BNXT_LINK_SPEED_400GB_IDX] = {
1739 [BNXT_SIG_MODE_PAM4] = {
1740 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
1741 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
1742 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
1743 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
1745 [BNXT_SIG_MODE_PAM4_112] = {
1746 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
1747 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
1748 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
1749 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
1754 #define BNXT_LINK_MODE_UNKNOWN -1
1756 static enum ethtool_link_mode_bit_indices
1757 bnxt_get_link_mode(struct bnxt_link_info *link_info)
1759 enum ethtool_link_mode_bit_indices link_mode;
1760 enum bnxt_link_speed_indices speed;
1761 enum bnxt_media_type media;
1764 if (link_info->phy_link_status != BNXT_LINK_LINK)
1765 return BNXT_LINK_MODE_UNKNOWN;
1767 media = bnxt_get_media(link_info);
1768 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
1769 speed = bnxt_fw_speed_idx(link_info->link_speed);
1770 sig_mode = link_info->active_fec_sig_mode &
1771 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
1773 speed = bnxt_fw_speed_idx(link_info->req_link_speed);
1774 sig_mode = link_info->req_signal_mode;
1776 if (sig_mode >= BNXT_SIG_MODE_MAX)
1777 return BNXT_LINK_MODE_UNKNOWN;
1779 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
1780 * link mode, but since no such devices exist, the zeroes in the
1781 * map can be conveniently used to represent unknown link modes.
1783 link_mode = bnxt_link_modes[speed][sig_mode][media];
1785 return BNXT_LINK_MODE_UNKNOWN;
1787 switch (link_mode) {
1788 case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1789 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1790 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
1792 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1793 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1794 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
1803 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
1804 struct ethtool_link_ksettings *lk_ksettings)
1806 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
1808 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
1809 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1810 lk_ksettings->link_modes.supported);
1811 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1812 lk_ksettings->link_modes.supported);
1815 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
1816 link_info->support_pam4_auto_speeds)
1817 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
1818 lk_ksettings->link_modes.supported);
1820 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1823 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
1824 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1825 lk_ksettings->link_modes.advertising);
1826 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
1827 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1828 lk_ksettings->link_modes.advertising);
1829 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
1830 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1831 lk_ksettings->link_modes.lp_advertising);
1832 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
1833 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1834 lk_ksettings->link_modes.lp_advertising);
1837 static const u16 bnxt_nrz_speed_masks[] = {
1838 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
1839 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
1840 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
1841 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
1842 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
1843 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
1844 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
1845 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
1848 static const u16 bnxt_pam4_speed_masks[] = {
1849 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
1850 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
1851 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
1852 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
1855 static const u16 bnxt_nrz_speeds2_masks[] = {
1856 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
1857 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
1858 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
1859 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
1860 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
1861 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
1862 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
1865 static const u16 bnxt_pam4_speeds2_masks[] = {
1866 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
1867 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
1868 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
1869 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
1872 static const u16 bnxt_pam4_112_speeds2_masks[] = {
1873 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
1874 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
1875 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
1878 static enum bnxt_link_speed_indices
1879 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
1885 case BNXT_SIG_MODE_NRZ:
1886 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
1887 speeds = bnxt_nrz_speeds2_masks;
1888 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
1890 speeds = bnxt_nrz_speed_masks;
1891 len = ARRAY_SIZE(bnxt_nrz_speed_masks);
1894 case BNXT_SIG_MODE_PAM4:
1895 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
1896 speeds = bnxt_pam4_speeds2_masks;
1897 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
1899 speeds = bnxt_pam4_speed_masks;
1900 len = ARRAY_SIZE(bnxt_pam4_speed_masks);
1903 case BNXT_SIG_MODE_PAM4_112:
1904 speeds = bnxt_pam4_112_speeds2_masks;
1905 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
1908 return BNXT_LINK_SPEED_UNKNOWN;
1911 for (idx = 0; idx < len; idx++) {
1912 if (speeds[idx] == speed_msk)
1916 return BNXT_LINK_SPEED_UNKNOWN;
1919 #define BNXT_FW_SPEED_MSK_BITS 16
1922 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
1923 u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
1925 enum ethtool_link_mode_bit_indices link_mode;
1926 enum bnxt_link_speed_indices speed;
1929 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
1930 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
1934 link_mode = bnxt_link_modes[speed][sig_mode][media];
1938 linkmode_set_bit(link_mode, et_mask);
1943 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
1944 u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
1947 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
1952 /* list speeds for all media if unknown */
1953 for (media = 1; media < __BNXT_MEDIA_END; media++)
1954 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
1959 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
1960 enum bnxt_media_type media,
1961 struct ethtool_link_ksettings *lk_ksettings)
1963 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
1964 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
1965 u16 phy_flags = bp->phy_flags;
1967 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
1968 sp_nrz = link_info->support_speeds2;
1969 sp_pam4 = link_info->support_speeds2;
1970 sp_pam4_112 = link_info->support_speeds2;
1972 sp_nrz = link_info->support_speeds;
1973 sp_pam4 = link_info->support_pam4_speeds;
1975 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
1976 lk_ksettings->link_modes.supported);
1977 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
1978 lk_ksettings->link_modes.supported);
1979 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
1980 phy_flags, lk_ksettings->link_modes.supported);
1984 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
1985 enum bnxt_media_type media,
1986 struct ethtool_link_ksettings *lk_ksettings)
1988 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
1989 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
1990 u16 phy_flags = bp->phy_flags;
1992 sp_nrz = link_info->advertising;
1993 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
1994 sp_pam4 = link_info->advertising;
1995 sp_pam4_112 = link_info->advertising;
1997 sp_pam4 = link_info->advertising_pam4;
1999 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2000 lk_ksettings->link_modes.advertising);
2001 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2002 lk_ksettings->link_modes.advertising);
2003 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2004 phy_flags, lk_ksettings->link_modes.advertising);
2008 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
2009 enum bnxt_media_type media,
2010 struct ethtool_link_ksettings *lk_ksettings)
2012 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2013 u16 phy_flags = bp->phy_flags;
2015 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
2016 BNXT_SIG_MODE_NRZ, phy_flags,
2017 lk_ksettings->link_modes.lp_advertising);
2018 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
2019 BNXT_SIG_MODE_PAM4, phy_flags,
2020 lk_ksettings->link_modes.lp_advertising);
2023 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
2024 u16 speed_msk, const unsigned long *et_mask,
2025 enum ethtool_link_mode_bit_indices mode)
2027 bool mode_desired = linkmode_test_bit(mode, et_mask);
2032 /* enabled speeds for installed media should override */
2033 if (installed_media && mode_desired) {
2034 *speeds |= speed_msk;
2035 *delta |= speed_msk;
2039 /* many to one mapping, only allow one change per fw_speed bit */
2040 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
2041 *speeds ^= speed_msk;
2042 *delta |= speed_msk;
2046 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
2047 const unsigned long *et_mask)
2049 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2050 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
2051 enum bnxt_media_type media = bnxt_get_media(link_info);
2052 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
2053 u32 delta_pam4_112 = 0;
2058 adv = &link_info->advertising;
2059 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2060 adv_pam4 = &link_info->advertising;
2061 adv_pam4_112 = &link_info->advertising;
2062 sp_msks = bnxt_nrz_speeds2_masks;
2063 sp_pam4_msks = bnxt_pam4_speeds2_masks;
2064 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
2066 adv_pam4 = &link_info->advertising_pam4;
2067 sp_msks = bnxt_nrz_speed_masks;
2068 sp_pam4_msks = bnxt_pam4_speed_masks;
2070 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
2071 /* accept any legal media from user */
2072 for (m = 1; m < __BNXT_MEDIA_END; m++) {
2073 bnxt_update_speed(&delta_nrz, m == media,
2074 adv, sp_msks[i], et_mask,
2075 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
2076 bnxt_update_speed(&delta_pam4, m == media,
2077 adv_pam4, sp_pam4_msks[i], et_mask,
2078 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
2082 bnxt_update_speed(&delta_pam4_112, m == media,
2083 adv_pam4_112, sp_pam4_112_msks[i], et_mask,
2084 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
2089 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
2090 struct ethtool_link_ksettings *lk_ksettings)
2092 u16 fec_cfg = link_info->fec_cfg;
2094 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
2095 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2096 lk_ksettings->link_modes.advertising);
2099 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2100 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2101 lk_ksettings->link_modes.advertising);
2102 if (fec_cfg & BNXT_FEC_ENC_RS)
2103 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2104 lk_ksettings->link_modes.advertising);
2105 if (fec_cfg & BNXT_FEC_ENC_LLRS)
2106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2107 lk_ksettings->link_modes.advertising);
2110 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
2111 struct ethtool_link_ksettings *lk_ksettings)
2113 u16 fec_cfg = link_info->fec_cfg;
2115 if (fec_cfg & BNXT_FEC_NONE) {
2116 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2117 lk_ksettings->link_modes.supported);
2120 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
2121 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2122 lk_ksettings->link_modes.supported);
2123 if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
2124 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2125 lk_ksettings->link_modes.supported);
2126 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
2127 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2128 lk_ksettings->link_modes.supported);
2131 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
2133 switch (fw_link_speed) {
2134 case BNXT_LINK_SPEED_100MB:
2136 case BNXT_LINK_SPEED_1GB:
2138 case BNXT_LINK_SPEED_2_5GB:
2140 case BNXT_LINK_SPEED_10GB:
2142 case BNXT_LINK_SPEED_20GB:
2144 case BNXT_LINK_SPEED_25GB:
2146 case BNXT_LINK_SPEED_40GB:
2148 case BNXT_LINK_SPEED_50GB:
2149 case BNXT_LINK_SPEED_50GB_PAM4:
2151 case BNXT_LINK_SPEED_100GB:
2152 case BNXT_LINK_SPEED_100GB_PAM4:
2153 case BNXT_LINK_SPEED_100GB_PAM4_112:
2154 return SPEED_100000;
2155 case BNXT_LINK_SPEED_200GB:
2156 case BNXT_LINK_SPEED_200GB_PAM4:
2157 case BNXT_LINK_SPEED_200GB_PAM4_112:
2158 return SPEED_200000;
2159 case BNXT_LINK_SPEED_400GB:
2160 case BNXT_LINK_SPEED_400GB_PAM4:
2161 case BNXT_LINK_SPEED_400GB_PAM4_112:
2162 return SPEED_400000;
2164 return SPEED_UNKNOWN;
2168 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
2169 struct bnxt_link_info *link_info)
2171 struct ethtool_link_settings *base = &lk_ksettings->base;
2173 if (link_info->link_state == BNXT_LINK_STATE_UP) {
2174 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
2175 base->duplex = DUPLEX_HALF;
2176 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2177 base->duplex = DUPLEX_FULL;
2178 lk_ksettings->lanes = link_info->active_lanes;
2179 } else if (!link_info->autoneg) {
2180 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
2181 base->duplex = DUPLEX_HALF;
2182 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
2183 base->duplex = DUPLEX_FULL;
2187 static int bnxt_get_link_ksettings(struct net_device *dev,
2188 struct ethtool_link_ksettings *lk_ksettings)
2190 struct ethtool_link_settings *base = &lk_ksettings->base;
2191 enum ethtool_link_mode_bit_indices link_mode;
2192 struct bnxt *bp = netdev_priv(dev);
2193 struct bnxt_link_info *link_info;
2194 enum bnxt_media_type media;
2196 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
2197 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
2198 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
2199 base->duplex = DUPLEX_UNKNOWN;
2200 base->speed = SPEED_UNKNOWN;
2201 link_info = &bp->link_info;
2203 mutex_lock(&bp->link_lock);
2204 bnxt_get_ethtool_modes(link_info, lk_ksettings);
2205 media = bnxt_get_media(link_info);
2206 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
2207 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
2208 link_mode = bnxt_get_link_mode(link_info);
2209 if (link_mode != BNXT_LINK_MODE_UNKNOWN)
2210 ethtool_params_from_link_mode(lk_ksettings, link_mode);
2212 bnxt_get_default_speeds(lk_ksettings, link_info);
2214 if (link_info->autoneg) {
2215 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
2216 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2217 lk_ksettings->link_modes.advertising);
2218 base->autoneg = AUTONEG_ENABLE;
2219 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
2220 if (link_info->phy_link_status == BNXT_LINK_LINK)
2221 bnxt_get_all_ethtool_lp_speeds(link_info, media,
2224 base->autoneg = AUTONEG_DISABLE;
2227 base->port = PORT_NONE;
2228 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2229 base->port = PORT_TP;
2230 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2231 lk_ksettings->link_modes.supported);
2232 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2233 lk_ksettings->link_modes.advertising);
2235 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2236 lk_ksettings->link_modes.supported);
2237 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2238 lk_ksettings->link_modes.advertising);
2240 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
2241 base->port = PORT_DA;
2243 base->port = PORT_FIBRE;
2245 base->phy_address = link_info->phy_addr;
2246 mutex_unlock(&bp->link_lock);
2252 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
2254 struct bnxt *bp = netdev_priv(dev);
2255 struct bnxt_link_info *link_info = &bp->link_info;
2256 u16 support_pam4_spds = link_info->support_pam4_speeds;
2257 u16 support_spds2 = link_info->support_speeds2;
2258 u16 support_spds = link_info->support_speeds;
2259 u8 sig_mode = BNXT_SIG_MODE_NRZ;
2260 u32 lanes_needed = 1;
2263 switch (ethtool_speed) {
2265 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
2266 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
2269 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
2270 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
2271 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2274 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
2275 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
2278 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
2279 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
2280 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2283 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
2284 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
2289 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
2290 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
2291 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2294 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
2295 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
2296 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2301 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
2302 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
2304 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2306 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
2307 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
2308 sig_mode = BNXT_SIG_MODE_PAM4;
2309 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
2310 fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
2311 sig_mode = BNXT_SIG_MODE_PAM4;
2315 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
2316 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
2317 lanes != 2 && lanes != 1) {
2318 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
2320 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
2321 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
2322 sig_mode = BNXT_SIG_MODE_PAM4;
2324 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
2326 fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
2327 sig_mode = BNXT_SIG_MODE_PAM4;
2329 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
2330 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
2331 sig_mode = BNXT_SIG_MODE_PAM4_112;
2335 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
2336 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
2337 sig_mode = BNXT_SIG_MODE_PAM4;
2339 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
2341 fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
2342 sig_mode = BNXT_SIG_MODE_PAM4;
2344 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
2345 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
2346 sig_mode = BNXT_SIG_MODE_PAM4_112;
2351 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
2353 fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
2354 sig_mode = BNXT_SIG_MODE_PAM4;
2356 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
2357 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
2358 sig_mode = BNXT_SIG_MODE_PAM4_112;
2365 netdev_err(dev, "unsupported speed!\n");
2369 if (lanes && lanes != lanes_needed) {
2370 netdev_err(dev, "unsupported number of lanes for speed\n");
2374 if (link_info->req_link_speed == fw_speed &&
2375 link_info->req_signal_mode == sig_mode &&
2376 link_info->autoneg == 0)
2379 link_info->req_link_speed = fw_speed;
2380 link_info->req_signal_mode = sig_mode;
2381 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
2382 link_info->autoneg = 0;
2383 link_info->advertising = 0;
2384 link_info->advertising_pam4 = 0;
2389 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
2391 u16 fw_speed_mask = 0;
2393 /* only support autoneg at speed 100, 1000, and 10000 */
2394 if (advertising & (ADVERTISED_100baseT_Full |
2395 ADVERTISED_100baseT_Half)) {
2396 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
2398 if (advertising & (ADVERTISED_1000baseT_Full |
2399 ADVERTISED_1000baseT_Half)) {
2400 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
2402 if (advertising & ADVERTISED_10000baseT_Full)
2403 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
2405 if (advertising & ADVERTISED_40000baseCR4_Full)
2406 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
2408 return fw_speed_mask;
2411 static int bnxt_set_link_ksettings(struct net_device *dev,
2412 const struct ethtool_link_ksettings *lk_ksettings)
2414 struct bnxt *bp = netdev_priv(dev);
2415 struct bnxt_link_info *link_info = &bp->link_info;
2416 const struct ethtool_link_settings *base = &lk_ksettings->base;
2417 bool set_pause = false;
2418 u32 speed, lanes = 0;
2421 if (!BNXT_PHY_CFG_ABLE(bp))
2424 mutex_lock(&bp->link_lock);
2425 if (base->autoneg == AUTONEG_ENABLE) {
2426 bnxt_set_ethtool_speeds(link_info,
2427 lk_ksettings->link_modes.advertising);
2428 link_info->autoneg |= BNXT_AUTONEG_SPEED;
2429 if (!link_info->advertising && !link_info->advertising_pam4) {
2430 link_info->advertising = link_info->support_auto_speeds;
2431 link_info->advertising_pam4 =
2432 link_info->support_pam4_auto_speeds;
2434 /* any change to autoneg will cause link change, therefore the
2435 * driver should put back the original pause setting in autoneg
2437 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2440 u8 phy_type = link_info->phy_type;
2442 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
2443 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
2444 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2445 netdev_err(dev, "10GBase-T devices must autoneg\n");
2447 goto set_setting_exit;
2449 if (base->duplex == DUPLEX_HALF) {
2450 netdev_err(dev, "HALF DUPLEX is not supported!\n");
2452 goto set_setting_exit;
2454 speed = base->speed;
2455 lanes = lk_ksettings->lanes;
2456 rc = bnxt_force_link_speed(dev, speed, lanes);
2458 if (rc == -EALREADY)
2460 goto set_setting_exit;
2464 if (netif_running(dev))
2465 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
2468 mutex_unlock(&bp->link_lock);
2472 static int bnxt_get_fecparam(struct net_device *dev,
2473 struct ethtool_fecparam *fec)
2475 struct bnxt *bp = netdev_priv(dev);
2476 struct bnxt_link_info *link_info;
2480 link_info = &bp->link_info;
2481 fec_cfg = link_info->fec_cfg;
2482 active_fec = link_info->active_fec_sig_mode &
2483 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
2484 if (fec_cfg & BNXT_FEC_NONE) {
2485 fec->fec = ETHTOOL_FEC_NONE;
2486 fec->active_fec = ETHTOOL_FEC_NONE;
2489 if (fec_cfg & BNXT_FEC_AUTONEG)
2490 fec->fec |= ETHTOOL_FEC_AUTO;
2491 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2492 fec->fec |= ETHTOOL_FEC_BASER;
2493 if (fec_cfg & BNXT_FEC_ENC_RS)
2494 fec->fec |= ETHTOOL_FEC_RS;
2495 if (fec_cfg & BNXT_FEC_ENC_LLRS)
2496 fec->fec |= ETHTOOL_FEC_LLRS;
2498 switch (active_fec) {
2499 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
2500 fec->active_fec |= ETHTOOL_FEC_BASER;
2502 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
2503 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
2504 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
2505 fec->active_fec |= ETHTOOL_FEC_RS;
2507 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
2508 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
2509 fec->active_fec |= ETHTOOL_FEC_LLRS;
2511 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
2512 fec->active_fec |= ETHTOOL_FEC_OFF;
2518 static void bnxt_get_fec_stats(struct net_device *dev,
2519 struct ethtool_fec_stats *fec_stats)
2521 struct bnxt *bp = netdev_priv(dev);
2524 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
2527 rx = bp->rx_port_stats_ext.sw_stats;
2528 fec_stats->corrected_bits.total =
2529 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
2531 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
2534 fec_stats->corrected_blocks.total =
2535 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
2536 fec_stats->uncorrectable_blocks.total =
2537 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
2540 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
2543 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
2545 if (fec & ETHTOOL_FEC_BASER)
2546 fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
2547 else if (fec & ETHTOOL_FEC_RS)
2548 fw_fec |= BNXT_FEC_RS_ON(link_info);
2549 else if (fec & ETHTOOL_FEC_LLRS)
2550 fw_fec |= BNXT_FEC_LLRS_ON;
2554 static int bnxt_set_fecparam(struct net_device *dev,
2555 struct ethtool_fecparam *fecparam)
2557 struct hwrm_port_phy_cfg_input *req;
2558 struct bnxt *bp = netdev_priv(dev);
2559 struct bnxt_link_info *link_info;
2560 u32 new_cfg, fec = fecparam->fec;
2564 link_info = &bp->link_info;
2565 fec_cfg = link_info->fec_cfg;
2566 if (fec_cfg & BNXT_FEC_NONE)
2569 if (fec & ETHTOOL_FEC_OFF) {
2570 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
2571 BNXT_FEC_ALL_OFF(link_info);
2574 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
2575 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
2576 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
2577 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
2580 if (fec & ETHTOOL_FEC_AUTO) {
2581 if (!link_info->autoneg)
2583 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
2585 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
2589 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
2592 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2593 rc = hwrm_req_send(bp, req);
2594 /* update current settings */
2596 mutex_lock(&bp->link_lock);
2597 bnxt_update_link(bp, false);
2598 mutex_unlock(&bp->link_lock);
2603 static void bnxt_get_pauseparam(struct net_device *dev,
2604 struct ethtool_pauseparam *epause)
2606 struct bnxt *bp = netdev_priv(dev);
2607 struct bnxt_link_info *link_info = &bp->link_info;
2611 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2612 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2613 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2616 static void bnxt_get_pause_stats(struct net_device *dev,
2617 struct ethtool_pause_stats *epstat)
2619 struct bnxt *bp = netdev_priv(dev);
2622 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2625 rx = bp->port_stats.sw_stats;
2626 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2628 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2629 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2632 static int bnxt_set_pauseparam(struct net_device *dev,
2633 struct ethtool_pauseparam *epause)
2636 struct bnxt *bp = netdev_priv(dev);
2637 struct bnxt_link_info *link_info = &bp->link_info;
2639 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2642 mutex_lock(&bp->link_lock);
2643 if (epause->autoneg) {
2644 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2649 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2650 link_info->req_flow_ctrl = 0;
2652 /* when transition from auto pause to force pause,
2653 * force a link change
2655 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2656 link_info->force_link_chng = true;
2657 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2658 link_info->req_flow_ctrl = 0;
2660 if (epause->rx_pause)
2661 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2663 if (epause->tx_pause)
2664 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2666 if (netif_running(dev))
2667 rc = bnxt_hwrm_set_pause(bp);
2670 mutex_unlock(&bp->link_lock);
2674 static u32 bnxt_get_link(struct net_device *dev)
2676 struct bnxt *bp = netdev_priv(dev);
2678 /* TODO: handle MF, VF, driver close case */
2679 return BNXT_LINK_IS_UP(bp);
2682 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2683 struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2685 struct hwrm_nvm_get_dev_info_output *resp;
2686 struct hwrm_nvm_get_dev_info_input *req;
2692 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
2696 resp = hwrm_req_hold(bp, req);
2697 rc = hwrm_req_send(bp, req);
2699 memcpy(nvm_dev_info, resp, sizeof(*resp));
2700 hwrm_req_drop(bp, req);
2704 static void bnxt_print_admin_err(struct bnxt *bp)
2706 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2709 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2710 u16 ext, u16 *index, u32 *item_length,
2713 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2714 u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2715 u32 dir_item_len, const u8 *data,
2718 struct bnxt *bp = netdev_priv(dev);
2719 struct hwrm_nvm_write_input *req;
2722 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
2726 if (data_len && data) {
2727 dma_addr_t dma_handle;
2730 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
2732 hwrm_req_drop(bp, req);
2736 req->dir_data_length = cpu_to_le32(data_len);
2738 memcpy(kmem, data, data_len);
2739 req->host_src_addr = cpu_to_le64(dma_handle);
2742 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
2743 req->dir_type = cpu_to_le16(dir_type);
2744 req->dir_ordinal = cpu_to_le16(dir_ordinal);
2745 req->dir_ext = cpu_to_le16(dir_ext);
2746 req->dir_attr = cpu_to_le16(dir_attr);
2747 req->dir_item_length = cpu_to_le32(dir_item_len);
2748 rc = hwrm_req_send(bp, req);
2751 bnxt_print_admin_err(bp);
2755 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
2756 u8 self_reset, u8 flags)
2758 struct bnxt *bp = netdev_priv(dev);
2759 struct hwrm_fw_reset_input *req;
2762 if (!bnxt_hwrm_reset_permitted(bp)) {
2763 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
2767 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
2771 req->embedded_proc_type = proc_type;
2772 req->selfrst_status = self_reset;
2775 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
2776 rc = hwrm_req_send_silent(bp, req);
2778 rc = hwrm_req_send(bp, req);
2780 bnxt_print_admin_err(bp);
2785 static int bnxt_firmware_reset(struct net_device *dev,
2786 enum bnxt_nvm_directory_type dir_type)
2788 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
2789 u8 proc_type, flags = 0;
2791 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
2792 /* (e.g. when firmware isn't already running) */
2794 case BNX_DIR_TYPE_CHIMP_PATCH:
2795 case BNX_DIR_TYPE_BOOTCODE:
2796 case BNX_DIR_TYPE_BOOTCODE_2:
2797 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
2798 /* Self-reset ChiMP upon next PCIe reset: */
2799 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2801 case BNX_DIR_TYPE_APE_FW:
2802 case BNX_DIR_TYPE_APE_PATCH:
2803 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
2804 /* Self-reset APE upon next PCIe reset: */
2805 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2807 case BNX_DIR_TYPE_KONG_FW:
2808 case BNX_DIR_TYPE_KONG_PATCH:
2809 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
2811 case BNX_DIR_TYPE_BONO_FW:
2812 case BNX_DIR_TYPE_BONO_PATCH:
2813 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
2819 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
2822 static int bnxt_firmware_reset_chip(struct net_device *dev)
2824 struct bnxt *bp = netdev_priv(dev);
2827 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
2828 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
2830 return bnxt_hwrm_firmware_reset(dev,
2831 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
2832 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
2836 static int bnxt_firmware_reset_ap(struct net_device *dev)
2838 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
2839 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
2843 static int bnxt_flash_firmware(struct net_device *dev,
2852 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
2855 case BNX_DIR_TYPE_BOOTCODE:
2856 case BNX_DIR_TYPE_BOOTCODE_2:
2857 code_type = CODE_BOOT;
2859 case BNX_DIR_TYPE_CHIMP_PATCH:
2860 code_type = CODE_CHIMP_PATCH;
2862 case BNX_DIR_TYPE_APE_FW:
2863 code_type = CODE_MCTP_PASSTHRU;
2865 case BNX_DIR_TYPE_APE_PATCH:
2866 code_type = CODE_APE_PATCH;
2868 case BNX_DIR_TYPE_KONG_FW:
2869 code_type = CODE_KONG_FW;
2871 case BNX_DIR_TYPE_KONG_PATCH:
2872 code_type = CODE_KONG_PATCH;
2874 case BNX_DIR_TYPE_BONO_FW:
2875 code_type = CODE_BONO_FW;
2877 case BNX_DIR_TYPE_BONO_PATCH:
2878 code_type = CODE_BONO_PATCH;
2881 netdev_err(dev, "Unsupported directory entry type: %u\n",
2885 if (fw_size < sizeof(struct bnxt_fw_header)) {
2886 netdev_err(dev, "Invalid firmware file size: %u\n",
2887 (unsigned int)fw_size);
2890 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
2891 netdev_err(dev, "Invalid firmware signature: %08X\n",
2892 le32_to_cpu(header->signature));
2895 if (header->code_type != code_type) {
2896 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
2897 code_type, header->code_type);
2900 if (header->device != DEVICE_CUMULUS_FAMILY) {
2901 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
2902 DEVICE_CUMULUS_FAMILY, header->device);
2905 /* Confirm the CRC32 checksum of the file: */
2906 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2907 sizeof(stored_crc)));
2908 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2909 if (calculated_crc != stored_crc) {
2910 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
2911 (unsigned long)stored_crc,
2912 (unsigned long)calculated_crc);
2915 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2916 0, 0, 0, fw_data, fw_size);
2917 if (rc == 0) /* Firmware update successful */
2918 rc = bnxt_firmware_reset(dev, dir_type);
2923 static int bnxt_flash_microcode(struct net_device *dev,
2928 struct bnxt_ucode_trailer *trailer;
2933 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
2934 netdev_err(dev, "Invalid microcode file size: %u\n",
2935 (unsigned int)fw_size);
2938 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
2940 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
2941 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
2942 le32_to_cpu(trailer->sig));
2945 if (le16_to_cpu(trailer->dir_type) != dir_type) {
2946 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
2947 dir_type, le16_to_cpu(trailer->dir_type));
2950 if (le16_to_cpu(trailer->trailer_length) <
2951 sizeof(struct bnxt_ucode_trailer)) {
2952 netdev_err(dev, "Invalid microcode trailer length: %d\n",
2953 le16_to_cpu(trailer->trailer_length));
2957 /* Confirm the CRC32 checksum of the file: */
2958 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2959 sizeof(stored_crc)));
2960 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2961 if (calculated_crc != stored_crc) {
2963 "CRC32 (%08lX) does not match calculated: %08lX\n",
2964 (unsigned long)stored_crc,
2965 (unsigned long)calculated_crc);
2968 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2969 0, 0, 0, fw_data, fw_size);
2974 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
2977 case BNX_DIR_TYPE_CHIMP_PATCH:
2978 case BNX_DIR_TYPE_BOOTCODE:
2979 case BNX_DIR_TYPE_BOOTCODE_2:
2980 case BNX_DIR_TYPE_APE_FW:
2981 case BNX_DIR_TYPE_APE_PATCH:
2982 case BNX_DIR_TYPE_KONG_FW:
2983 case BNX_DIR_TYPE_KONG_PATCH:
2984 case BNX_DIR_TYPE_BONO_FW:
2985 case BNX_DIR_TYPE_BONO_PATCH:
2992 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
2995 case BNX_DIR_TYPE_AVS:
2996 case BNX_DIR_TYPE_EXP_ROM_MBA:
2997 case BNX_DIR_TYPE_PCIE:
2998 case BNX_DIR_TYPE_TSCF_UCODE:
2999 case BNX_DIR_TYPE_EXT_PHY:
3000 case BNX_DIR_TYPE_CCM:
3001 case BNX_DIR_TYPE_ISCSI_BOOT:
3002 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3003 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3010 static bool bnxt_dir_type_is_executable(u16 dir_type)
3012 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3013 bnxt_dir_type_is_other_exec_format(dir_type);
3016 static int bnxt_flash_firmware_from_file(struct net_device *dev,
3018 const char *filename)
3020 const struct firmware *fw;
3023 rc = request_firmware(&fw, filename, &dev->dev);
3025 netdev_err(dev, "Error %d requesting firmware file: %s\n",
3029 if (bnxt_dir_type_is_ape_bin_format(dir_type))
3030 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
3031 else if (bnxt_dir_type_is_other_exec_format(dir_type))
3032 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
3034 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3035 0, 0, 0, fw->data, fw->size);
3036 release_firmware(fw);
3040 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3041 #define MSG_INVALID_PKG "PKG install error : Invalid package"
3042 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3043 #define MSG_INVALID_DEV "PKG install error : Invalid device"
3044 #define MSG_INTERNAL_ERR "PKG install error : Internal error"
3045 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3046 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3047 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3048 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3049 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3051 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
3052 struct netlink_ext_ack *extack)
3055 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
3056 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
3057 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
3058 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
3059 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
3060 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
3061 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
3063 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
3064 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
3065 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
3066 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
3067 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
3068 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
3069 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
3070 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
3071 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
3072 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
3073 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
3074 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
3075 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
3076 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
3078 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
3079 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
3081 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
3082 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
3083 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
3084 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
3085 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
3086 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
3089 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
3094 #define BNXT_PKG_DMA_SIZE 0x40000
3095 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3096 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3098 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
3099 struct netlink_ext_ack *extack)
3104 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3105 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
3108 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3112 if (fw_size > item_len) {
3113 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
3114 BNX_DIR_ORDINAL_FIRST, 0, 1,
3115 round_up(fw_size, 4096), NULL, 0);
3117 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
3124 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
3125 u32 install_type, struct netlink_ext_ack *extack)
3127 struct hwrm_nvm_install_update_input *install;
3128 struct hwrm_nvm_install_update_output *resp;
3129 struct hwrm_nvm_modify_input *modify;
3130 struct bnxt *bp = netdev_priv(dev);
3131 bool defrag_attempted = false;
3132 dma_addr_t dma_handle;
3140 /* resize before flashing larger image than available space */
3141 rc = bnxt_resize_update_entry(dev, fw->size, extack);
3145 bnxt_hwrm_fw_set_time(bp);
3147 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
3151 /* Try allocating a large DMA buffer first. Older fw will
3152 * cause excessive NVRAM erases when using small blocks.
3154 modify_len = roundup_pow_of_two(fw->size);
3155 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
3157 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
3158 if (!kmem && modify_len > PAGE_SIZE)
3164 hwrm_req_drop(bp, modify);
3168 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
3170 hwrm_req_drop(bp, modify);
3174 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
3175 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
3177 hwrm_req_hold(bp, modify);
3178 modify->host_src_addr = cpu_to_le64(dma_handle);
3180 resp = hwrm_req_hold(bp, install);
3181 if ((install_type & 0xffff) == 0)
3182 install_type >>= 16;
3183 install->install_type = cpu_to_le32(install_type);
3186 u32 copied = 0, len = modify_len;
3188 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3189 BNX_DIR_ORDINAL_FIRST,
3191 &index, &item_len, NULL);
3193 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3196 if (fw->size > item_len) {
3197 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
3202 modify->dir_idx = cpu_to_le16(index);
3204 if (fw->size > modify_len)
3205 modify->flags = BNXT_NVM_MORE_FLAG;
3206 while (copied < fw->size) {
3207 u32 balance = fw->size - copied;
3209 if (balance <= modify_len) {
3212 modify->flags |= BNXT_NVM_LAST_FLAG;
3214 memcpy(kmem, fw->data + copied, len);
3215 modify->len = cpu_to_le32(len);
3216 modify->offset = cpu_to_le32(copied);
3217 rc = hwrm_req_send(bp, modify);
3223 rc = hwrm_req_send_silent(bp, install);
3227 if (defrag_attempted) {
3228 /* We have tried to defragment already in the previous
3229 * iteration. Return with the result for INSTALL_UPDATE
3234 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3237 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
3238 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
3241 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
3243 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
3245 rc = hwrm_req_send_silent(bp, install);
3249 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3251 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
3252 /* FW has cleared NVM area, driver will create
3253 * UPDATE directory and try the flash again
3255 defrag_attempted = true;
3257 rc = bnxt_flash_nvram(bp->dev,
3258 BNX_DIR_TYPE_UPDATE,
3259 BNX_DIR_ORDINAL_FIRST,
3260 0, 0, item_len, NULL, 0);
3266 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
3268 } while (defrag_attempted && !rc);
3271 hwrm_req_drop(bp, modify);
3272 hwrm_req_drop(bp, install);
3275 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
3276 (s8)resp->result, (int)resp->problem_item);
3277 rc = nvm_update_err_to_stderr(dev, resp->result, extack);
3280 bnxt_print_admin_err(bp);
3284 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
3285 u32 install_type, struct netlink_ext_ack *extack)
3287 const struct firmware *fw;
3290 rc = request_firmware(&fw, filename, &dev->dev);
3292 netdev_err(dev, "PKG error %d requesting file: %s\n",
3297 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
3299 release_firmware(fw);
3304 static int bnxt_flash_device(struct net_device *dev,
3305 struct ethtool_flash *flash)
3307 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
3308 netdev_err(dev, "flashdev not supported from a virtual function\n");
3312 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
3313 flash->region > 0xffff)
3314 return bnxt_flash_package_from_file(dev, flash->data,
3315 flash->region, NULL);
3317 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
3320 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
3322 struct hwrm_nvm_get_dir_info_output *output;
3323 struct hwrm_nvm_get_dir_info_input *req;
3324 struct bnxt *bp = netdev_priv(dev);
3327 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
3331 output = hwrm_req_hold(bp, req);
3332 rc = hwrm_req_send(bp, req);
3334 *entries = le32_to_cpu(output->entries);
3335 *length = le32_to_cpu(output->entry_length);
3337 hwrm_req_drop(bp, req);
3341 static int bnxt_get_eeprom_len(struct net_device *dev)
3343 struct bnxt *bp = netdev_priv(dev);
3348 /* The -1 return value allows the entire 32-bit range of offsets to be
3349 * passed via the ethtool command-line utility.
3354 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
3356 struct bnxt *bp = netdev_priv(dev);
3362 dma_addr_t dma_handle;
3363 struct hwrm_nvm_get_dir_entries_input *req;
3365 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
3369 if (!dir_entries || !entry_length)
3372 /* Insert 2 bytes of directory info (count and size of entries) */
3376 *data++ = dir_entries;
3377 *data++ = entry_length;
3379 memset(data, 0xff, len);
3381 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
3385 buflen = mul_u32_u32(dir_entries, entry_length);
3386 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
3388 hwrm_req_drop(bp, req);
3391 req->host_dest_addr = cpu_to_le64(dma_handle);
3393 hwrm_req_hold(bp, req); /* hold the slice */
3394 rc = hwrm_req_send(bp, req);
3396 memcpy(data, buf, len > buflen ? buflen : len);
3397 hwrm_req_drop(bp, req);
3401 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
3402 u32 length, u8 *data)
3404 struct bnxt *bp = netdev_priv(dev);
3407 dma_addr_t dma_handle;
3408 struct hwrm_nvm_read_input *req;
3413 rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
3417 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
3419 hwrm_req_drop(bp, req);
3423 req->host_dest_addr = cpu_to_le64(dma_handle);
3424 req->dir_idx = cpu_to_le16(index);
3425 req->offset = cpu_to_le32(offset);
3426 req->len = cpu_to_le32(length);
3428 hwrm_req_hold(bp, req); /* hold the slice */
3429 rc = hwrm_req_send(bp, req);
3431 memcpy(data, buf, length);
3432 hwrm_req_drop(bp, req);
3436 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
3437 u16 ext, u16 *index, u32 *item_length,
3440 struct hwrm_nvm_find_dir_entry_output *output;
3441 struct hwrm_nvm_find_dir_entry_input *req;
3442 struct bnxt *bp = netdev_priv(dev);
3445 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
3451 req->dir_type = cpu_to_le16(type);
3452 req->dir_ordinal = cpu_to_le16(ordinal);
3453 req->dir_ext = cpu_to_le16(ext);
3454 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
3455 output = hwrm_req_hold(bp, req);
3456 rc = hwrm_req_send_silent(bp, req);
3459 *index = le16_to_cpu(output->dir_idx);
3461 *item_length = le32_to_cpu(output->dir_item_length);
3463 *data_length = le32_to_cpu(output->dir_data_length);
3465 hwrm_req_drop(bp, req);
3469 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
3471 char *retval = NULL;
3478 /* null-terminate the log data (removing last '\n'): */
3479 data[datalen - 1] = 0;
3480 for (p = data; *p != 0; p++) {
3483 while (*p != 0 && *p != '\n') {
3485 while (*p != 0 && *p != '\t' && *p != '\n')
3487 if (field == desired_field)
3502 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
3504 struct bnxt *bp = netdev_priv(dev);
3511 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
3512 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
3513 &index, NULL, &pkglen);
3517 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
3519 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
3524 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
3528 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
3530 if (pkgver && *pkgver != 0 && isdigit(*pkgver))
3531 strscpy(ver, pkgver, size);
3541 static void bnxt_get_pkgver(struct net_device *dev)
3543 struct bnxt *bp = netdev_priv(dev);
3544 char buf[FW_VER_STR_LEN];
3547 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
3548 len = strlen(bp->fw_ver_str);
3549 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
3554 static int bnxt_get_eeprom(struct net_device *dev,
3555 struct ethtool_eeprom *eeprom,
3561 if (eeprom->offset == 0) /* special offset value to get directory */
3562 return bnxt_get_nvram_directory(dev, eeprom->len, data);
3564 index = eeprom->offset >> 24;
3565 offset = eeprom->offset & 0xffffff;
3568 netdev_err(dev, "unsupported index value: %d\n", index);
3572 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
3575 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
3577 struct hwrm_nvm_erase_dir_entry_input *req;
3578 struct bnxt *bp = netdev_priv(dev);
3581 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
3585 req->dir_idx = cpu_to_le16(index);
3586 return hwrm_req_send(bp, req);
3589 static int bnxt_set_eeprom(struct net_device *dev,
3590 struct ethtool_eeprom *eeprom,
3593 struct bnxt *bp = netdev_priv(dev);
3595 u16 type, ext, ordinal, attr;
3598 netdev_err(dev, "NVM write not supported from a virtual function\n");
3602 type = eeprom->magic >> 16;
3604 if (type == 0xffff) { /* special value for directory operations */
3605 index = eeprom->magic & 0xff;
3606 dir_op = eeprom->magic >> 8;
3610 case 0x0e: /* erase */
3611 if (eeprom->offset != ~eeprom->magic)
3613 return bnxt_erase_nvram_directory(dev, index - 1);
3619 /* Create or re-write an NVM item: */
3620 if (bnxt_dir_type_is_executable(type))
3622 ext = eeprom->magic & 0xffff;
3623 ordinal = eeprom->offset >> 16;
3624 attr = eeprom->offset & 0xffff;
3626 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
3630 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
3632 struct bnxt *bp = netdev_priv(dev);
3633 struct ethtool_eee *eee = &bp->eee;
3634 struct bnxt_link_info *link_info = &bp->link_info;
3638 if (!BNXT_PHY_CFG_ABLE(bp))
3641 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3644 mutex_lock(&bp->link_lock);
3645 advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
3646 if (!edata->eee_enabled)
3649 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3650 netdev_warn(dev, "EEE requires autoneg\n");
3654 if (edata->tx_lpi_enabled) {
3655 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
3656 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
3657 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
3658 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
3661 } else if (!bp->lpi_tmr_hi) {
3662 edata->tx_lpi_timer = eee->tx_lpi_timer;
3665 if (!edata->advertised) {
3666 edata->advertised = advertising & eee->supported;
3667 } else if (edata->advertised & ~advertising) {
3668 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
3669 edata->advertised, advertising);
3674 eee->advertised = edata->advertised;
3675 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
3676 eee->tx_lpi_timer = edata->tx_lpi_timer;
3678 eee->eee_enabled = edata->eee_enabled;
3680 if (netif_running(dev))
3681 rc = bnxt_hwrm_set_link_setting(bp, false, true);
3684 mutex_unlock(&bp->link_lock);
3688 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
3690 struct bnxt *bp = netdev_priv(dev);
3692 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3696 if (!bp->eee.eee_enabled) {
3697 /* Preserve tx_lpi_timer so that the last value will be used
3698 * by default when it is re-enabled.
3700 edata->advertised = 0;
3701 edata->tx_lpi_enabled = 0;
3704 if (!bp->eee.eee_active)
3705 edata->lp_advertised = 0;
3710 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
3711 u16 page_number, u8 bank,
3712 u16 start_addr, u16 data_length,
3715 struct hwrm_port_phy_i2c_read_output *output;
3716 struct hwrm_port_phy_i2c_read_input *req;
3717 int rc, byte_offset = 0;
3719 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
3723 output = hwrm_req_hold(bp, req);
3724 req->i2c_slave_addr = i2c_addr;
3725 req->page_number = cpu_to_le16(page_number);
3726 req->port_id = cpu_to_le16(bp->pf.port_id);
3730 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3731 data_length -= xfer_size;
3732 req->page_offset = cpu_to_le16(start_addr + byte_offset);
3733 req->data_length = xfer_size;
3735 cpu_to_le32((start_addr + byte_offset ?
3736 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
3739 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
3741 rc = hwrm_req_send(bp, req);
3743 memcpy(buf + byte_offset, output->data, xfer_size);
3744 byte_offset += xfer_size;
3745 } while (!rc && data_length > 0);
3746 hwrm_req_drop(bp, req);
3751 static int bnxt_get_module_info(struct net_device *dev,
3752 struct ethtool_modinfo *modinfo)
3754 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
3755 struct bnxt *bp = netdev_priv(dev);
3758 /* No point in going further if phy status indicates
3759 * module is not inserted or if it is powered down or
3760 * if it is of type 10GBase-T
3762 if (bp->link_info.module_status >
3763 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3766 /* This feature is not supported in older firmware versions */
3767 if (bp->hwrm_spec_code < 0x10202)
3770 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
3771 SFF_DIAG_SUPPORT_OFFSET + 1,
3774 u8 module_id = data[0];
3775 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
3777 switch (module_id) {
3778 case SFF_MODULE_ID_SFP:
3779 modinfo->type = ETH_MODULE_SFF_8472;
3780 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3781 if (!diag_supported)
3782 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3784 case SFF_MODULE_ID_QSFP:
3785 case SFF_MODULE_ID_QSFP_PLUS:
3786 modinfo->type = ETH_MODULE_SFF_8436;
3787 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3789 case SFF_MODULE_ID_QSFP28:
3790 modinfo->type = ETH_MODULE_SFF_8636;
3791 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
3801 static int bnxt_get_module_eeprom(struct net_device *dev,
3802 struct ethtool_eeprom *eeprom,
3805 struct bnxt *bp = netdev_priv(dev);
3806 u16 start = eeprom->offset, length = eeprom->len;
3809 memset(data, 0, eeprom->len);
3811 /* Read A0 portion of the EEPROM */
3812 if (start < ETH_MODULE_SFF_8436_LEN) {
3813 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
3814 length = ETH_MODULE_SFF_8436_LEN - start;
3815 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
3816 start, length, data);
3821 length = eeprom->len - length;
3824 /* Read A2 portion of the EEPROM */
3826 start -= ETH_MODULE_SFF_8436_LEN;
3827 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
3828 start, length, data);
3833 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
3835 if (bp->link_info.module_status <=
3836 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3839 switch (bp->link_info.module_status) {
3840 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
3841 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
3843 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
3844 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
3846 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
3847 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
3850 NL_SET_ERR_MSG_MOD(extack, "Unknown error");
3856 static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
3857 const struct ethtool_module_eeprom *page_data,
3858 struct netlink_ext_ack *extack)
3860 struct bnxt *bp = netdev_priv(dev);
3863 rc = bnxt_get_module_status(bp, extack);
3867 if (bp->hwrm_spec_code < 0x10202) {
3868 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
3872 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
3873 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
3877 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
3878 page_data->page, page_data->bank,
3883 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
3886 return page_data->length;
3889 static int bnxt_nway_reset(struct net_device *dev)
3893 struct bnxt *bp = netdev_priv(dev);
3894 struct bnxt_link_info *link_info = &bp->link_info;
3896 if (!BNXT_PHY_CFG_ABLE(bp))
3899 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
3902 if (netif_running(dev))
3903 rc = bnxt_hwrm_set_link_setting(bp, true, false);
3908 static int bnxt_set_phys_id(struct net_device *dev,
3909 enum ethtool_phys_id_state state)
3911 struct hwrm_port_led_cfg_input *req;
3912 struct bnxt *bp = netdev_priv(dev);
3913 struct bnxt_pf_info *pf = &bp->pf;
3914 struct bnxt_led_cfg *led_cfg;
3919 if (!bp->num_leds || BNXT_VF(bp))
3922 if (state == ETHTOOL_ID_ACTIVE) {
3923 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
3924 duration = cpu_to_le16(500);
3925 } else if (state == ETHTOOL_ID_INACTIVE) {
3926 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
3927 duration = cpu_to_le16(0);
3931 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
3935 req->port_id = cpu_to_le16(pf->port_id);
3936 req->num_leds = bp->num_leds;
3937 led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
3938 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3939 req->enables |= BNXT_LED_DFLT_ENABLES(i);
3940 led_cfg->led_id = bp->leds[i].led_id;
3941 led_cfg->led_state = led_state;
3942 led_cfg->led_blink_on = duration;
3943 led_cfg->led_blink_off = duration;
3944 led_cfg->led_group_id = bp->leds[i].led_group_id;
3946 return hwrm_req_send(bp, req);
3949 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
3951 struct hwrm_selftest_irq_input *req;
3954 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
3958 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3959 return hwrm_req_send(bp, req);
3962 static int bnxt_test_irq(struct bnxt *bp)
3966 for (i = 0; i < bp->cp_nr_rings; i++) {
3967 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
3970 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
3977 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
3979 struct hwrm_port_mac_cfg_input *req;
3982 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
3986 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
3988 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
3990 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
3991 return hwrm_req_send(bp, req);
3994 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
3996 struct hwrm_port_phy_qcaps_output *resp;
3997 struct hwrm_port_phy_qcaps_input *req;
4000 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
4004 resp = hwrm_req_hold(bp, req);
4005 rc = hwrm_req_send(bp, req);
4007 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
4009 hwrm_req_drop(bp, req);
4013 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
4014 struct hwrm_port_phy_cfg_input *req)
4016 struct bnxt_link_info *link_info = &bp->link_info;
4021 if (!link_info->autoneg ||
4022 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
4025 rc = bnxt_query_force_speeds(bp, &fw_advertising);
4029 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
4030 if (BNXT_LINK_IS_UP(bp))
4031 fw_speed = bp->link_info.link_speed;
4032 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
4033 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
4034 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
4035 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
4036 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
4037 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
4038 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
4039 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
4041 req->force_link_speed = cpu_to_le16(fw_speed);
4042 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
4043 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4044 rc = hwrm_req_send(bp, req);
4046 req->force_link_speed = cpu_to_le16(0);
4050 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
4052 struct hwrm_port_phy_cfg_input *req;
4055 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
4059 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4060 hwrm_req_hold(bp, req);
4063 bnxt_disable_an_for_lpbk(bp, req);
4065 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
4067 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
4069 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
4071 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
4072 rc = hwrm_req_send(bp, req);
4073 hwrm_req_drop(bp, req);
4077 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4078 u32 raw_cons, int pkt_size)
4080 struct bnxt_napi *bnapi = cpr->bnapi;
4081 struct bnxt_rx_ring_info *rxr;
4082 struct bnxt_sw_rx_bd *rx_buf;
4083 struct rx_cmp *rxcmp;
4089 rxr = bnapi->rx_ring;
4090 cp_cons = RING_CMP(raw_cons);
4091 rxcmp = (struct rx_cmp *)
4092 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
4093 cons = rxcmp->rx_cmp_opaque;
4094 rx_buf = &rxr->rx_buf_ring[cons];
4095 data = rx_buf->data_ptr;
4096 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
4097 if (len != pkt_size)
4100 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
4103 for ( ; i < pkt_size; i++) {
4104 if (data[i] != (u8)(i & 0xff))
4110 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4113 struct tx_cmp *txcmp;
4119 raw_cons = cpr->cp_raw_cons;
4120 for (i = 0; i < 200; i++) {
4121 cons = RING_CMP(raw_cons);
4122 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
4124 if (!TX_CMP_VALID(txcmp, raw_cons)) {
4129 /* The valid test of the entry must be done first before
4130 * reading any further.
4133 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
4134 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
4135 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
4136 raw_cons = NEXT_RAW_CMP(raw_cons);
4137 raw_cons = NEXT_RAW_CMP(raw_cons);
4140 raw_cons = NEXT_RAW_CMP(raw_cons);
4142 cpr->cp_raw_cons = raw_cons;
4146 static int bnxt_run_loopback(struct bnxt *bp)
4148 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
4149 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4150 struct bnxt_cp_ring_info *cpr;
4151 int pkt_size, i = 0;
4152 struct sk_buff *skb;
4157 cpr = &rxr->bnapi->cp_ring;
4158 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4160 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
4161 skb = netdev_alloc_skb(bp->dev, pkt_size);
4164 data = skb_put(skb, pkt_size);
4165 ether_addr_copy(&data[i], bp->dev->dev_addr);
4167 ether_addr_copy(&data[i], bp->dev->dev_addr);
4169 for ( ; i < pkt_size; i++)
4170 data[i] = (u8)(i & 0xff);
4172 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
4174 if (dma_mapping_error(&bp->pdev->dev, map)) {
4178 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
4180 /* Sync BD data before updating doorbell */
4183 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
4184 rc = bnxt_poll_loopback(bp, cpr, pkt_size);
4186 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
4191 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
4193 struct hwrm_selftest_exec_output *resp;
4194 struct hwrm_selftest_exec_input *req;
4197 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
4201 hwrm_req_timeout(bp, req, bp->test_info->timeout);
4202 req->flags = test_mask;
4204 resp = hwrm_req_hold(bp, req);
4205 rc = hwrm_req_send(bp, req);
4206 *test_results = resp->test_success;
4207 hwrm_req_drop(bp, req);
4211 #define BNXT_DRV_TESTS 4
4212 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
4213 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
4214 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
4215 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
4217 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
4220 struct bnxt *bp = netdev_priv(dev);
4221 bool do_ext_lpbk = false;
4222 bool offline = false;
4223 u8 test_results = 0;
4227 if (!bp->num_tests || !BNXT_PF(bp))
4229 memset(buf, 0, sizeof(u64) * bp->num_tests);
4230 if (!netif_running(dev)) {
4231 etest->flags |= ETH_TEST_FL_FAILED;
4235 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
4236 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
4239 if (etest->flags & ETH_TEST_FL_OFFLINE) {
4240 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
4241 etest->flags |= ETH_TEST_FL_FAILED;
4242 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
4248 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4249 u8 bit_val = 1 << i;
4251 if (!(bp->test_info->offline_mask & bit_val))
4252 test_mask |= bit_val;
4254 test_mask |= bit_val;
4257 bnxt_run_fw_tests(bp, test_mask, &test_results);
4260 bnxt_close_nic(bp, true, false);
4261 bnxt_run_fw_tests(bp, test_mask, &test_results);
4263 buf[BNXT_MACLPBK_TEST_IDX] = 1;
4264 bnxt_hwrm_mac_loopback(bp, true);
4266 rc = bnxt_half_open_nic(bp);
4268 bnxt_hwrm_mac_loopback(bp, false);
4269 etest->flags |= ETH_TEST_FL_FAILED;
4270 bnxt_ulp_start(bp, rc);
4273 if (bnxt_run_loopback(bp))
4274 etest->flags |= ETH_TEST_FL_FAILED;
4276 buf[BNXT_MACLPBK_TEST_IDX] = 0;
4278 bnxt_hwrm_mac_loopback(bp, false);
4279 bnxt_hwrm_phy_loopback(bp, true, false);
4281 if (bnxt_run_loopback(bp)) {
4282 buf[BNXT_PHYLPBK_TEST_IDX] = 1;
4283 etest->flags |= ETH_TEST_FL_FAILED;
4286 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
4287 bnxt_hwrm_phy_loopback(bp, true, true);
4289 if (bnxt_run_loopback(bp)) {
4290 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
4291 etest->flags |= ETH_TEST_FL_FAILED;
4294 bnxt_hwrm_phy_loopback(bp, false, false);
4295 bnxt_half_close_nic(bp);
4296 rc = bnxt_open_nic(bp, true, true);
4297 bnxt_ulp_start(bp, rc);
4299 if (rc || bnxt_test_irq(bp)) {
4300 buf[BNXT_IRQ_TEST_IDX] = 1;
4301 etest->flags |= ETH_TEST_FL_FAILED;
4303 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4304 u8 bit_val = 1 << i;
4306 if ((test_mask & bit_val) && !(test_results & bit_val)) {
4308 etest->flags |= ETH_TEST_FL_FAILED;
4313 static int bnxt_reset(struct net_device *dev, u32 *flags)
4315 struct bnxt *bp = netdev_priv(dev);
4316 bool reload = false;
4323 netdev_err(dev, "Reset is not supported from a VF\n");
4327 if (pci_vfs_assigned(bp->pdev) &&
4328 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
4330 "Reset not allowed when VFs are assigned to VMs\n");
4334 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
4335 /* This feature is not supported in older firmware versions */
4336 if (bp->hwrm_spec_code >= 0x10803) {
4337 if (!bnxt_firmware_reset_chip(dev)) {
4338 netdev_info(dev, "Firmware reset request successful.\n");
4339 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
4341 *flags &= ~BNXT_FW_RESET_CHIP;
4343 } else if (req == BNXT_FW_RESET_CHIP) {
4344 return -EOPNOTSUPP; /* only request, fail hard */
4348 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
4349 /* This feature is not supported in older firmware versions */
4350 if (bp->hwrm_spec_code >= 0x10803) {
4351 if (!bnxt_firmware_reset_ap(dev)) {
4352 netdev_info(dev, "Reset application processor successful.\n");
4354 *flags &= ~BNXT_FW_RESET_AP;
4356 } else if (req == BNXT_FW_RESET_AP) {
4357 return -EOPNOTSUPP; /* only request, fail hard */
4362 netdev_info(dev, "Reload driver to complete reset\n");
4367 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
4369 struct bnxt *bp = netdev_priv(dev);
4371 if (dump->flag > BNXT_DUMP_CRASH) {
4372 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
4376 if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
4377 netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
4381 bp->dump_flag = dump->flag;
4385 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
4387 struct bnxt *bp = netdev_priv(dev);
4389 if (bp->hwrm_spec_code < 0x10801)
4392 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
4393 bp->ver_resp.hwrm_fw_min_8b << 16 |
4394 bp->ver_resp.hwrm_fw_bld_8b << 8 |
4395 bp->ver_resp.hwrm_fw_rsvd_8b;
4397 dump->flag = bp->dump_flag;
4398 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
4402 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
4405 struct bnxt *bp = netdev_priv(dev);
4407 if (bp->hwrm_spec_code < 0x10801)
4410 memset(buf, 0, dump->len);
4412 dump->flag = bp->dump_flag;
4413 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
4416 static int bnxt_get_ts_info(struct net_device *dev,
4417 struct ethtool_ts_info *info)
4419 struct bnxt *bp = netdev_priv(dev);
4420 struct bnxt_ptp_cfg *ptp;
4423 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4424 SOF_TIMESTAMPING_RX_SOFTWARE |
4425 SOF_TIMESTAMPING_SOFTWARE;
4427 info->phc_index = -1;
4431 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
4432 SOF_TIMESTAMPING_RX_HARDWARE |
4433 SOF_TIMESTAMPING_RAW_HARDWARE;
4435 info->phc_index = ptp_clock_index(ptp->ptp_clock);
4437 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
4439 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
4440 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
4441 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
4443 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
4444 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
4448 void bnxt_ethtool_init(struct bnxt *bp)
4450 struct hwrm_selftest_qlist_output *resp;
4451 struct hwrm_selftest_qlist_input *req;
4452 struct bnxt_test_info *test_info;
4453 struct net_device *dev = bp->dev;
4456 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
4457 bnxt_get_pkgver(dev);
4460 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
4463 test_info = bp->test_info;
4465 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
4468 bp->test_info = test_info;
4471 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
4474 resp = hwrm_req_hold(bp, req);
4475 rc = hwrm_req_send_silent(bp, req);
4477 goto ethtool_init_exit;
4479 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
4480 if (bp->num_tests > BNXT_MAX_TEST)
4481 bp->num_tests = BNXT_MAX_TEST;
4483 test_info->offline_mask = resp->offline_tests;
4484 test_info->timeout = le16_to_cpu(resp->test_timeout);
4485 if (!test_info->timeout)
4486 test_info->timeout = HWRM_CMD_TIMEOUT;
4487 for (i = 0; i < bp->num_tests; i++) {
4488 char *str = test_info->string[i];
4489 char *fw_str = resp->test_name[i];
4491 if (i == BNXT_MACLPBK_TEST_IDX) {
4492 strcpy(str, "Mac loopback test (offline)");
4493 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
4494 strcpy(str, "Phy loopback test (offline)");
4495 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
4496 strcpy(str, "Ext loopback test (offline)");
4497 } else if (i == BNXT_IRQ_TEST_IDX) {
4498 strcpy(str, "Interrupt_test (offline)");
4500 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
4501 fw_str, test_info->offline_mask & (1 << i) ?
4502 "offline" : "online");
4507 hwrm_req_drop(bp, req);
4510 static void bnxt_get_eth_phy_stats(struct net_device *dev,
4511 struct ethtool_eth_phy_stats *phy_stats)
4513 struct bnxt *bp = netdev_priv(dev);
4516 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4519 rx = bp->rx_port_stats_ext.sw_stats;
4520 phy_stats->SymbolErrorDuringCarrier =
4521 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
4524 static void bnxt_get_eth_mac_stats(struct net_device *dev,
4525 struct ethtool_eth_mac_stats *mac_stats)
4527 struct bnxt *bp = netdev_priv(dev);
4530 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4533 rx = bp->port_stats.sw_stats;
4534 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4536 mac_stats->FramesReceivedOK =
4537 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
4538 mac_stats->FramesTransmittedOK =
4539 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
4540 mac_stats->FrameCheckSequenceErrors =
4541 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
4542 mac_stats->AlignmentErrors =
4543 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
4544 mac_stats->OutOfRangeLengthField =
4545 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
4548 static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
4549 struct ethtool_eth_ctrl_stats *ctrl_stats)
4551 struct bnxt *bp = netdev_priv(dev);
4554 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4557 rx = bp->port_stats.sw_stats;
4558 ctrl_stats->MACControlFramesReceived =
4559 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
4562 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
4576 static void bnxt_get_rmon_stats(struct net_device *dev,
4577 struct ethtool_rmon_stats *rmon_stats,
4578 const struct ethtool_rmon_hist_range **ranges)
4580 struct bnxt *bp = netdev_priv(dev);
4583 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4586 rx = bp->port_stats.sw_stats;
4587 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4589 rmon_stats->jabbers =
4590 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
4591 rmon_stats->oversize_pkts =
4592 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
4593 rmon_stats->undersize_pkts =
4594 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
4596 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
4597 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
4598 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
4599 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
4600 rmon_stats->hist[4] =
4601 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
4602 rmon_stats->hist[5] =
4603 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
4604 rmon_stats->hist[6] =
4605 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
4606 rmon_stats->hist[7] =
4607 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
4608 rmon_stats->hist[8] =
4609 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
4610 rmon_stats->hist[9] =
4611 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
4613 rmon_stats->hist_tx[0] =
4614 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
4615 rmon_stats->hist_tx[1] =
4616 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
4617 rmon_stats->hist_tx[2] =
4618 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
4619 rmon_stats->hist_tx[3] =
4620 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
4621 rmon_stats->hist_tx[4] =
4622 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
4623 rmon_stats->hist_tx[5] =
4624 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
4625 rmon_stats->hist_tx[6] =
4626 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
4627 rmon_stats->hist_tx[7] =
4628 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
4629 rmon_stats->hist_tx[8] =
4630 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
4631 rmon_stats->hist_tx[9] =
4632 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
4634 *ranges = bnxt_rmon_ranges;
4637 static void bnxt_get_link_ext_stats(struct net_device *dev,
4638 struct ethtool_link_ext_stats *stats)
4640 struct bnxt *bp = netdev_priv(dev);
4643 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4646 rx = bp->rx_port_stats_ext.sw_stats;
4647 stats->link_down_events =
4648 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
4651 void bnxt_ethtool_free(struct bnxt *bp)
4653 kfree(bp->test_info);
4654 bp->test_info = NULL;
4657 const struct ethtool_ops bnxt_ethtool_ops = {
4658 .cap_link_lanes_supported = 1,
4659 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4660 ETHTOOL_COALESCE_MAX_FRAMES |
4661 ETHTOOL_COALESCE_USECS_IRQ |
4662 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
4663 ETHTOOL_COALESCE_STATS_BLOCK_USECS |
4664 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
4665 ETHTOOL_COALESCE_USE_CQE,
4666 .get_link_ksettings = bnxt_get_link_ksettings,
4667 .set_link_ksettings = bnxt_set_link_ksettings,
4668 .get_fec_stats = bnxt_get_fec_stats,
4669 .get_fecparam = bnxt_get_fecparam,
4670 .set_fecparam = bnxt_set_fecparam,
4671 .get_pause_stats = bnxt_get_pause_stats,
4672 .get_pauseparam = bnxt_get_pauseparam,
4673 .set_pauseparam = bnxt_set_pauseparam,
4674 .get_drvinfo = bnxt_get_drvinfo,
4675 .get_regs_len = bnxt_get_regs_len,
4676 .get_regs = bnxt_get_regs,
4677 .get_wol = bnxt_get_wol,
4678 .set_wol = bnxt_set_wol,
4679 .get_coalesce = bnxt_get_coalesce,
4680 .set_coalesce = bnxt_set_coalesce,
4681 .get_msglevel = bnxt_get_msglevel,
4682 .set_msglevel = bnxt_set_msglevel,
4683 .get_sset_count = bnxt_get_sset_count,
4684 .get_strings = bnxt_get_strings,
4685 .get_ethtool_stats = bnxt_get_ethtool_stats,
4686 .set_ringparam = bnxt_set_ringparam,
4687 .get_ringparam = bnxt_get_ringparam,
4688 .get_channels = bnxt_get_channels,
4689 .set_channels = bnxt_set_channels,
4690 .get_rxnfc = bnxt_get_rxnfc,
4691 .set_rxnfc = bnxt_set_rxnfc,
4692 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
4693 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
4694 .get_rxfh = bnxt_get_rxfh,
4695 .set_rxfh = bnxt_set_rxfh,
4696 .flash_device = bnxt_flash_device,
4697 .get_eeprom_len = bnxt_get_eeprom_len,
4698 .get_eeprom = bnxt_get_eeprom,
4699 .set_eeprom = bnxt_set_eeprom,
4700 .get_link = bnxt_get_link,
4701 .get_link_ext_stats = bnxt_get_link_ext_stats,
4702 .get_eee = bnxt_get_eee,
4703 .set_eee = bnxt_set_eee,
4704 .get_module_info = bnxt_get_module_info,
4705 .get_module_eeprom = bnxt_get_module_eeprom,
4706 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
4707 .nway_reset = bnxt_nway_reset,
4708 .set_phys_id = bnxt_set_phys_id,
4709 .self_test = bnxt_self_test,
4710 .get_ts_info = bnxt_get_ts_info,
4711 .reset = bnxt_reset,
4712 .set_dump = bnxt_set_dump,
4713 .get_dump_flag = bnxt_get_dump_flag,
4714 .get_dump_data = bnxt_get_dump_data,
4715 .get_eth_phy_stats = bnxt_get_eth_phy_stats,
4716 .get_eth_mac_stats = bnxt_get_eth_mac_stats,
4717 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
4718 .get_rmon_stats = bnxt_get_rmon_stats,