1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/bitops.h>
12 #include <linux/ctype.h>
13 #include <linux/stringify.h>
14 #include <linux/ethtool.h>
15 #include <linux/ethtool_netlink.h>
16 #include <linux/linkmode.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
20 #include <linux/crc32.h>
21 #include <linux/firmware.h>
22 #include <linux/utsname.h>
23 #include <linux/time.h>
24 #include <linux/ptp_clock_kernel.h>
25 #include <linux/net_tstamp.h>
26 #include <linux/timecounter.h>
27 #include <net/netlink.h>
30 #include "bnxt_hwrm.h"
34 #include "bnxt_ethtool.h"
35 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
36 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
37 #include "bnxt_coredump.h"
39 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \
42 NL_SET_ERR_MSG_MOD(extack, msg); \
43 netdev_err(dev, "%s\n", msg); \
46 static u32 bnxt_get_msglevel(struct net_device *dev)
48 struct bnxt *bp = netdev_priv(dev);
50 return bp->msg_enable;
53 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
55 struct bnxt *bp = netdev_priv(dev);
57 bp->msg_enable = value;
60 static int bnxt_get_coalesce(struct net_device *dev,
61 struct ethtool_coalesce *coal,
62 struct kernel_ethtool_coalesce *kernel_coal,
63 struct netlink_ext_ack *extack)
65 struct bnxt *bp = netdev_priv(dev);
66 struct bnxt_coal *hw_coal;
69 memset(coal, 0, sizeof(*coal));
71 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
73 hw_coal = &bp->rx_coal;
74 mult = hw_coal->bufs_per_record;
75 coal->rx_coalesce_usecs = hw_coal->coal_ticks;
76 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
77 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
78 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
80 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
81 kernel_coal->use_cqe_mode_rx = true;
83 hw_coal = &bp->tx_coal;
84 mult = hw_coal->bufs_per_record;
85 coal->tx_coalesce_usecs = hw_coal->coal_ticks;
86 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
87 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
88 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
90 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
91 kernel_coal->use_cqe_mode_tx = true;
93 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
98 static int bnxt_set_coalesce(struct net_device *dev,
99 struct ethtool_coalesce *coal,
100 struct kernel_ethtool_coalesce *kernel_coal,
101 struct netlink_ext_ack *extack)
103 struct bnxt *bp = netdev_priv(dev);
104 bool update_stats = false;
105 struct bnxt_coal *hw_coal;
109 if (coal->use_adaptive_rx_coalesce) {
110 bp->flags |= BNXT_FLAG_DIM;
112 if (bp->flags & BNXT_FLAG_DIM) {
113 bp->flags &= ~(BNXT_FLAG_DIM);
118 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
119 !(bp->coal_cap.cmpl_params &
120 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
123 hw_coal = &bp->rx_coal;
124 mult = hw_coal->bufs_per_record;
125 hw_coal->coal_ticks = coal->rx_coalesce_usecs;
126 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
127 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
128 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
130 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
131 if (kernel_coal->use_cqe_mode_rx)
133 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
135 hw_coal = &bp->tx_coal;
136 mult = hw_coal->bufs_per_record;
137 hw_coal->coal_ticks = coal->tx_coalesce_usecs;
138 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
139 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
140 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
142 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
143 if (kernel_coal->use_cqe_mode_tx)
145 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
147 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
148 u32 stats_ticks = coal->stats_block_coalesce_usecs;
150 /* Allow 0, which means disable. */
152 stats_ticks = clamp_t(u32, stats_ticks,
153 BNXT_MIN_STATS_COAL_TICKS,
154 BNXT_MAX_STATS_COAL_TICKS);
155 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
156 bp->stats_coal_ticks = stats_ticks;
157 if (bp->stats_coal_ticks)
158 bp->current_interval =
159 bp->stats_coal_ticks * HZ / 1000000;
161 bp->current_interval = BNXT_TIMER_INTERVAL;
166 if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
168 bnxt_close_nic(bp, true, false);
169 rc = bnxt_open_nic(bp, true, false);
171 rc = bnxt_hwrm_set_coal(bp);
178 static const char * const bnxt_ring_rx_stats_str[] = {
189 static const char * const bnxt_ring_tx_stats_str[] = {
200 static const char * const bnxt_ring_tpa_stats_str[] = {
207 static const char * const bnxt_ring_tpa2_stats_str[] = {
208 "rx_tpa_eligible_pkt",
209 "rx_tpa_eligible_bytes",
216 static const char * const bnxt_rx_sw_stats_str[] = {
222 static const char * const bnxt_cmn_sw_stats_str[] = {
226 #define BNXT_RX_STATS_ENTRY(counter) \
227 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
229 #define BNXT_TX_STATS_ENTRY(counter) \
230 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
232 #define BNXT_RX_STATS_EXT_ENTRY(counter) \
233 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
235 #define BNXT_TX_STATS_EXT_ENTRY(counter) \
236 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
238 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
239 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
242 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
243 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
246 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \
247 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
248 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
249 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
250 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
251 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
252 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
253 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
254 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
256 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \
257 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
258 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
259 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
260 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
261 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
262 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
263 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
264 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
266 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
267 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
268 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
270 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
271 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
272 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
274 #define BNXT_RX_STATS_EXT_COS_ENTRIES \
275 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
276 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
277 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
278 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
279 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
280 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
281 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
282 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
284 #define BNXT_TX_STATS_EXT_COS_ENTRIES \
285 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
286 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
287 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
288 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
289 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
290 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
291 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
292 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
294 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
295 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
298 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
299 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
308 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
309 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
310 __stringify(counter##_pri##n) }
312 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
313 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
314 __stringify(counter##_pri##n) }
316 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \
317 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
318 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
319 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
320 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
321 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
322 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
323 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
324 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
326 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \
327 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
328 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
329 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
330 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
331 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
332 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
333 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
334 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
342 static const char *const bnxt_ring_err_stats_arr[] = {
343 "rx_total_l4_csum_errors",
345 "rx_total_buf_errors",
346 "rx_total_oom_discards",
347 "rx_total_netpoll_discards",
348 "rx_total_ring_discards",
350 "tx_total_ring_discards",
354 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
355 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
356 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
357 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
359 static const struct {
361 char string[ETH_GSTRING_LEN];
362 } bnxt_port_stats_arr[] = {
363 BNXT_RX_STATS_ENTRY(rx_64b_frames),
364 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
365 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
366 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
367 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
368 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
369 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
370 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
371 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
372 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
373 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
374 BNXT_RX_STATS_ENTRY(rx_total_frames),
375 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
376 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
377 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
378 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
379 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
380 BNXT_RX_STATS_ENTRY(rx_pause_frames),
381 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
382 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
383 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
384 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
385 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
386 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
387 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
388 BNXT_RX_STATS_ENTRY(rx_good_frames),
389 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
397 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
398 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
400 BNXT_RX_STATS_ENTRY(rx_bytes),
401 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
402 BNXT_RX_STATS_ENTRY(rx_runt_frames),
403 BNXT_RX_STATS_ENTRY(rx_stat_discard),
404 BNXT_RX_STATS_ENTRY(rx_stat_err),
406 BNXT_TX_STATS_ENTRY(tx_64b_frames),
407 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
408 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
409 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
410 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
411 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
412 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
413 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
414 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
415 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
416 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
417 BNXT_TX_STATS_ENTRY(tx_good_frames),
418 BNXT_TX_STATS_ENTRY(tx_total_frames),
419 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
420 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
421 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
422 BNXT_TX_STATS_ENTRY(tx_pause_frames),
423 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
424 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
425 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
426 BNXT_TX_STATS_ENTRY(tx_err),
427 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
428 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
436 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
438 BNXT_TX_STATS_ENTRY(tx_total_collisions),
439 BNXT_TX_STATS_ENTRY(tx_bytes),
440 BNXT_TX_STATS_ENTRY(tx_xthol_frames),
441 BNXT_TX_STATS_ENTRY(tx_stat_discard),
442 BNXT_TX_STATS_ENTRY(tx_stat_error),
445 static const struct {
447 char string[ETH_GSTRING_LEN];
448 } bnxt_port_stats_ext_arr[] = {
449 BNXT_RX_STATS_EXT_ENTRY(link_down_events),
450 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
451 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
452 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
453 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
454 BNXT_RX_STATS_EXT_COS_ENTRIES,
455 BNXT_RX_STATS_EXT_PFC_ENTRIES,
456 BNXT_RX_STATS_EXT_ENTRY(rx_bits),
457 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
458 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
459 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
460 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
461 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
463 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
466 static const struct {
468 char string[ETH_GSTRING_LEN];
469 } bnxt_tx_port_stats_ext_arr[] = {
470 BNXT_TX_STATS_EXT_COS_ENTRIES,
471 BNXT_TX_STATS_EXT_PFC_ENTRIES,
474 static const struct {
476 char string[ETH_GSTRING_LEN];
477 } bnxt_rx_bytes_pri_arr[] = {
478 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
481 static const struct {
483 char string[ETH_GSTRING_LEN];
484 } bnxt_rx_pkts_pri_arr[] = {
485 BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
488 static const struct {
490 char string[ETH_GSTRING_LEN];
491 } bnxt_tx_bytes_pri_arr[] = {
492 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
495 static const struct {
497 char string[ETH_GSTRING_LEN];
498 } bnxt_tx_pkts_pri_arr[] = {
499 BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
502 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr)
503 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
504 #define BNXT_NUM_STATS_PRI \
505 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
506 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
507 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
508 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
510 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
512 if (BNXT_SUPPORTS_TPA(bp)) {
513 if (bp->max_tpa_v2) {
514 if (BNXT_CHIP_P5(bp))
515 return BNXT_NUM_TPA_RING_STATS_P5;
516 return BNXT_NUM_TPA_RING_STATS_P7;
518 return BNXT_NUM_TPA_RING_STATS;
523 static int bnxt_get_num_ring_stats(struct bnxt *bp)
527 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
528 bnxt_get_num_tpa_ring_stats(bp);
529 tx = NUM_RING_TX_HW_STATS;
530 cmn = NUM_RING_CMN_SW_STATS;
531 return rx * bp->rx_nr_rings +
532 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
533 cmn * bp->cp_nr_rings;
536 static int bnxt_get_num_stats(struct bnxt *bp)
538 int num_stats = bnxt_get_num_ring_stats(bp);
541 num_stats += BNXT_NUM_RING_ERR_STATS;
543 if (bp->flags & BNXT_FLAG_PORT_STATS)
544 num_stats += BNXT_NUM_PORT_STATS;
546 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
547 len = min_t(int, bp->fw_rx_stats_ext_size,
548 ARRAY_SIZE(bnxt_port_stats_ext_arr));
550 len = min_t(int, bp->fw_tx_stats_ext_size,
551 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
553 if (bp->pri2cos_valid)
554 num_stats += BNXT_NUM_STATS_PRI;
560 static int bnxt_get_sset_count(struct net_device *dev, int sset)
562 struct bnxt *bp = netdev_priv(dev);
566 return bnxt_get_num_stats(bp);
570 return bp->num_tests;
576 static bool is_rx_ring(struct bnxt *bp, int ring_num)
578 return ring_num < bp->rx_nr_rings;
581 static bool is_tx_ring(struct bnxt *bp, int ring_num)
585 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
586 tx_base = bp->rx_nr_rings;
588 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
593 static void bnxt_get_ethtool_stats(struct net_device *dev,
594 struct ethtool_stats *stats, u64 *buf)
596 struct bnxt_total_ring_err_stats ring_err_stats = {0};
597 struct bnxt *bp = netdev_priv(dev);
603 j += bnxt_get_num_ring_stats(bp);
604 goto skip_ring_stats;
607 tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
608 for (i = 0; i < bp->cp_nr_rings; i++) {
609 struct bnxt_napi *bnapi = bp->bnapi[i];
610 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
611 u64 *sw_stats = cpr->stats.sw_stats;
615 if (is_rx_ring(bp, i)) {
616 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
617 buf[j] = sw_stats[k];
619 if (is_tx_ring(bp, i)) {
620 k = NUM_RING_RX_HW_STATS;
621 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
623 buf[j] = sw_stats[k];
625 if (!tpa_stats || !is_rx_ring(bp, i))
626 goto skip_tpa_ring_stats;
628 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
629 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
631 buf[j] = sw_stats[k];
634 sw = (u64 *)&cpr->sw_stats->rx;
635 if (is_rx_ring(bp, i)) {
636 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
640 sw = (u64 *)&cpr->sw_stats->cmn;
641 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
645 bnxt_get_ring_err_stats(bp, &ring_err_stats);
648 curr = &ring_err_stats.rx_total_l4_csum_errors;
649 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
650 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
651 buf[j] = *curr + *prev;
653 if (bp->flags & BNXT_FLAG_PORT_STATS) {
654 u64 *port_stats = bp->port_stats.sw_stats;
656 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
657 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
659 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
660 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
661 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
664 len = min_t(u32, bp->fw_rx_stats_ext_size,
665 ARRAY_SIZE(bnxt_port_stats_ext_arr));
666 for (i = 0; i < len; i++, j++) {
667 buf[j] = *(rx_port_stats_ext +
668 bnxt_port_stats_ext_arr[i].offset);
670 len = min_t(u32, bp->fw_tx_stats_ext_size,
671 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
672 for (i = 0; i < len; i++, j++) {
673 buf[j] = *(tx_port_stats_ext +
674 bnxt_tx_port_stats_ext_arr[i].offset);
676 if (bp->pri2cos_valid) {
677 for (i = 0; i < 8; i++, j++) {
678 long n = bnxt_rx_bytes_pri_arr[i].base_off +
681 buf[j] = *(rx_port_stats_ext + n);
683 for (i = 0; i < 8; i++, j++) {
684 long n = bnxt_rx_pkts_pri_arr[i].base_off +
687 buf[j] = *(rx_port_stats_ext + n);
689 for (i = 0; i < 8; i++, j++) {
690 long n = bnxt_tx_bytes_pri_arr[i].base_off +
693 buf[j] = *(tx_port_stats_ext + n);
695 for (i = 0; i < 8; i++, j++) {
696 long n = bnxt_tx_pkts_pri_arr[i].base_off +
699 buf[j] = *(tx_port_stats_ext + n);
705 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
707 struct bnxt *bp = netdev_priv(dev);
708 static const char * const *str;
713 for (i = 0; i < bp->cp_nr_rings; i++) {
714 if (is_rx_ring(bp, i)) {
715 num_str = NUM_RING_RX_HW_STATS;
716 for (j = 0; j < num_str; j++) {
717 sprintf(buf, "[%d]: %s", i,
718 bnxt_ring_rx_stats_str[j]);
719 buf += ETH_GSTRING_LEN;
722 if (is_tx_ring(bp, i)) {
723 num_str = NUM_RING_TX_HW_STATS;
724 for (j = 0; j < num_str; j++) {
725 sprintf(buf, "[%d]: %s", i,
726 bnxt_ring_tx_stats_str[j]);
727 buf += ETH_GSTRING_LEN;
730 num_str = bnxt_get_num_tpa_ring_stats(bp);
731 if (!num_str || !is_rx_ring(bp, i))
735 str = bnxt_ring_tpa2_stats_str;
737 str = bnxt_ring_tpa_stats_str;
739 for (j = 0; j < num_str; j++) {
740 sprintf(buf, "[%d]: %s", i, str[j]);
741 buf += ETH_GSTRING_LEN;
744 if (is_rx_ring(bp, i)) {
745 num_str = NUM_RING_RX_SW_STATS;
746 for (j = 0; j < num_str; j++) {
747 sprintf(buf, "[%d]: %s", i,
748 bnxt_rx_sw_stats_str[j]);
749 buf += ETH_GSTRING_LEN;
752 num_str = NUM_RING_CMN_SW_STATS;
753 for (j = 0; j < num_str; j++) {
754 sprintf(buf, "[%d]: %s", i,
755 bnxt_cmn_sw_stats_str[j]);
756 buf += ETH_GSTRING_LEN;
759 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
760 strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
761 buf += ETH_GSTRING_LEN;
764 if (bp->flags & BNXT_FLAG_PORT_STATS) {
765 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
766 strcpy(buf, bnxt_port_stats_arr[i].string);
767 buf += ETH_GSTRING_LEN;
770 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
773 len = min_t(u32, bp->fw_rx_stats_ext_size,
774 ARRAY_SIZE(bnxt_port_stats_ext_arr));
775 for (i = 0; i < len; i++) {
776 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
777 buf += ETH_GSTRING_LEN;
779 len = min_t(u32, bp->fw_tx_stats_ext_size,
780 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
781 for (i = 0; i < len; i++) {
783 bnxt_tx_port_stats_ext_arr[i].string);
784 buf += ETH_GSTRING_LEN;
786 if (bp->pri2cos_valid) {
787 for (i = 0; i < 8; i++) {
789 bnxt_rx_bytes_pri_arr[i].string);
790 buf += ETH_GSTRING_LEN;
792 for (i = 0; i < 8; i++) {
794 bnxt_rx_pkts_pri_arr[i].string);
795 buf += ETH_GSTRING_LEN;
797 for (i = 0; i < 8; i++) {
799 bnxt_tx_bytes_pri_arr[i].string);
800 buf += ETH_GSTRING_LEN;
802 for (i = 0; i < 8; i++) {
804 bnxt_tx_pkts_pri_arr[i].string);
805 buf += ETH_GSTRING_LEN;
812 memcpy(buf, bp->test_info->string,
813 bp->num_tests * ETH_GSTRING_LEN);
816 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
822 static void bnxt_get_ringparam(struct net_device *dev,
823 struct ethtool_ringparam *ering,
824 struct kernel_ethtool_ringparam *kernel_ering,
825 struct netlink_ext_ack *extack)
827 struct bnxt *bp = netdev_priv(dev);
829 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
830 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
831 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
832 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
834 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
835 ering->rx_jumbo_max_pending = 0;
836 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
838 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
840 ering->rx_pending = bp->rx_ring_size;
841 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
842 ering->tx_pending = bp->tx_ring_size;
845 static int bnxt_set_ringparam(struct net_device *dev,
846 struct ethtool_ringparam *ering,
847 struct kernel_ethtool_ringparam *kernel_ering,
848 struct netlink_ext_ack *extack)
850 struct bnxt *bp = netdev_priv(dev);
852 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
853 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
854 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
857 if (netif_running(dev))
858 bnxt_close_nic(bp, false, false);
860 bp->rx_ring_size = ering->rx_pending;
861 bp->tx_ring_size = ering->tx_pending;
862 bnxt_set_ring_params(bp);
864 if (netif_running(dev))
865 return bnxt_open_nic(bp, false, false);
870 static void bnxt_get_channels(struct net_device *dev,
871 struct ethtool_channels *channel)
873 struct bnxt *bp = netdev_priv(dev);
874 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
875 int max_rx_rings, max_tx_rings, tcs;
876 int max_tx_sch_inputs, tx_grps;
878 /* Get the most up-to-date max_tx_sch_inputs. */
879 if (netif_running(dev) && BNXT_NEW_RM(bp))
880 bnxt_hwrm_func_resc_qcaps(bp, false);
881 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
883 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
884 if (max_tx_sch_inputs)
885 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
888 tx_grps = max(tcs, 1);
889 if (bp->tx_nr_rings_xdp)
891 max_tx_rings /= tx_grps;
892 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
894 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
898 if (max_tx_sch_inputs)
899 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
904 channel->max_rx = max_rx_rings;
905 channel->max_tx = max_tx_rings;
906 channel->max_other = 0;
907 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
908 channel->combined_count = bp->rx_nr_rings;
909 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
910 channel->combined_count--;
912 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
913 channel->rx_count = bp->rx_nr_rings;
914 channel->tx_count = bp->tx_nr_rings_per_tc;
919 static int bnxt_set_channels(struct net_device *dev,
920 struct ethtool_channels *channel)
922 struct bnxt *bp = netdev_priv(dev);
923 int req_tx_rings, req_rx_rings, tcs;
929 if (channel->other_count)
932 if (!channel->combined_count &&
933 (!channel->rx_count || !channel->tx_count))
936 if (channel->combined_count &&
937 (channel->rx_count || channel->tx_count))
940 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
944 if (channel->combined_count)
949 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
950 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
951 if (bp->tx_nr_rings_xdp) {
953 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
956 tx_xdp = req_rx_rings;
959 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
960 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
961 netif_is_rxfh_configured(dev)) {
962 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
966 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
968 netdev_warn(dev, "Unable to allocate the requested rings\n");
972 if (netif_running(dev)) {
974 /* TODO CHIMP_FW: Send message to all VF's
978 bnxt_close_nic(bp, true, false);
982 bp->flags |= BNXT_FLAG_SHARED_RINGS;
983 bp->rx_nr_rings = channel->combined_count;
984 bp->tx_nr_rings_per_tc = channel->combined_count;
986 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
987 bp->rx_nr_rings = channel->rx_count;
988 bp->tx_nr_rings_per_tc = channel->tx_count;
990 bp->tx_nr_rings_xdp = tx_xdp;
991 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
993 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
995 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
996 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
997 tx_cp + bp->rx_nr_rings;
999 /* After changing number of rx channels, update NTUPLE feature. */
1000 netdev_update_features(dev);
1001 if (netif_running(dev)) {
1002 rc = bnxt_open_nic(bp, true, false);
1003 if ((!rc) && BNXT_PF(bp)) {
1004 /* TODO CHIMP_FW: Send message to all VF's
1009 rc = bnxt_reserve_rings(bp, true);
1015 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
1016 int tbl_size, u32 *ids, u32 start,
1023 for (i = 0; i < tbl_size; i++) {
1024 struct hlist_head *head;
1025 struct bnxt_filter_base *fltr;
1028 hlist_for_each_entry_rcu(fltr, head, hash) {
1030 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
1032 ids[j++] = fltr->sw_id;
1040 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
1041 struct hlist_head tbl[],
1042 int tbl_size, u32 id)
1046 for (i = 0; i < tbl_size; i++) {
1047 struct hlist_head *head;
1048 struct bnxt_filter_base *fltr;
1051 hlist_for_each_entry_rcu(fltr, head, hash) {
1052 if (fltr->flags && fltr->sw_id == id)
1059 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1064 cmd->data = bp->ntp_fltr_count;
1066 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
1067 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
1069 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
1070 BNXT_NTP_FLTR_HASH_SIZE,
1078 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1080 struct ethtool_rx_flow_spec *fs =
1081 (struct ethtool_rx_flow_spec *)&cmd->fs;
1082 struct bnxt_filter_base *fltr_base;
1083 struct bnxt_ntuple_filter *fltr;
1084 struct bnxt_flow_masks *fmasks;
1085 struct flow_keys *fkeys;
1088 if (fs->location >= bp->max_fltr)
1092 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
1093 BNXT_L2_FLTR_HASH_SIZE,
1096 struct ethhdr *h_ether = &fs->h_u.ether_spec;
1097 struct ethhdr *m_ether = &fs->m_u.ether_spec;
1098 struct bnxt_l2_filter *l2_fltr;
1099 struct bnxt_l2_key *l2_key;
1101 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
1102 l2_key = &l2_fltr->l2_key;
1103 fs->flow_type = ETHER_FLOW;
1104 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
1105 eth_broadcast_addr(m_ether->h_dest);
1107 struct ethtool_flow_ext *m_ext = &fs->m_ext;
1108 struct ethtool_flow_ext *h_ext = &fs->h_ext;
1110 fs->flow_type |= FLOW_EXT;
1111 m_ext->vlan_tci = htons(0xfff);
1112 h_ext->vlan_tci = htons(l2_key->vlan);
1114 if (fltr_base->flags & BNXT_ACT_RING_DST)
1115 fs->ring_cookie = fltr_base->rxq;
1116 if (fltr_base->flags & BNXT_ACT_FUNC_DST)
1117 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
1118 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1122 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1123 BNXT_NTP_FLTR_HASH_SIZE,
1129 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1131 fkeys = &fltr->fkeys;
1132 fmasks = &fltr->fmasks;
1133 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1134 if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
1135 fkeys->basic.ip_proto == IPPROTO_RAW) {
1136 fs->flow_type = IP_USER_FLOW;
1137 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1138 if (fkeys->basic.ip_proto == IPPROTO_ICMP)
1139 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
1141 fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
1142 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
1143 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
1144 fs->flow_type = TCP_V4_FLOW;
1145 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
1146 fs->flow_type = UDP_V4_FLOW;
1151 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1152 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
1153 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1154 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
1155 if (fs->flow_type == TCP_V4_FLOW ||
1156 fs->flow_type == UDP_V4_FLOW) {
1157 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1158 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
1159 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1160 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
1163 if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
1164 fkeys->basic.ip_proto == IPPROTO_RAW) {
1165 fs->flow_type = IPV6_USER_FLOW;
1166 if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
1167 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
1169 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
1170 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
1171 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
1172 fs->flow_type = TCP_V6_FLOW;
1173 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
1174 fs->flow_type = UDP_V6_FLOW;
1179 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1180 fkeys->addrs.v6addrs.src;
1181 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
1182 fmasks->addrs.v6addrs.src;
1183 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1184 fkeys->addrs.v6addrs.dst;
1185 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
1186 fmasks->addrs.v6addrs.dst;
1187 if (fs->flow_type == TCP_V6_FLOW ||
1188 fs->flow_type == UDP_V6_FLOW) {
1189 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1190 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
1191 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1192 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
1196 if (fltr->base.flags & BNXT_ACT_DROP)
1197 fs->ring_cookie = RX_CLS_FLOW_DISC;
1199 fs->ring_cookie = fltr->base.rxq;
1208 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
1211 struct ethtool_rxfh_context *ctx;
1213 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
1216 return ethtool_rxfh_context_priv(ctx);
1219 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
1220 struct bnxt_vnic_info *vnic)
1222 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
1224 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
1225 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
1226 vnic->rss_table_size,
1227 &vnic->rss_table_dma_addr,
1229 if (!vnic->rss_table)
1232 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
1233 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
1237 static int bnxt_add_l2_cls_rule(struct bnxt *bp,
1238 struct ethtool_rx_flow_spec *fs)
1240 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1241 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1242 struct ethhdr *h_ether = &fs->h_u.ether_spec;
1243 struct ethhdr *m_ether = &fs->m_u.ether_spec;
1244 struct bnxt_l2_filter *fltr;
1245 struct bnxt_l2_key key;
1250 if (BNXT_CHIP_P5_PLUS(bp))
1253 if (!is_broadcast_ether_addr(m_ether->h_dest))
1255 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
1257 if (fs->flow_type & FLOW_EXT) {
1258 struct ethtool_flow_ext *m_ext = &fs->m_ext;
1259 struct ethtool_flow_ext *h_ext = &fs->h_ext;
1261 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
1263 key.vlan = ntohs(h_ext->vlan_tci);
1267 flags = BNXT_ACT_FUNC_DST;
1271 flags = BNXT_ACT_RING_DST;
1272 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
1274 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
1276 return PTR_ERR(fltr);
1278 fltr->base.fw_vnic_id = vnic_id;
1279 fltr->base.rxq = ring;
1280 fltr->base.vf_idx = vf;
1281 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
1283 bnxt_del_l2_filter(bp, fltr);
1285 fs->location = fltr->base.sw_id;
1289 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
1290 struct ethtool_usrip4_spec *ip_mask)
1292 if (ip_mask->l4_4_bytes || ip_mask->tos ||
1293 ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
1294 ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
1295 (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
1300 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
1301 struct ethtool_usrip6_spec *ip_mask)
1303 if (ip_mask->l4_4_bytes || ip_mask->tclass ||
1304 ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
1305 (ip_spec->l4_proto != IPPROTO_RAW &&
1306 ip_spec->l4_proto != IPPROTO_ICMPV6))
1311 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
1312 struct ethtool_rxnfc *cmd)
1314 struct ethtool_rx_flow_spec *fs = &cmd->fs;
1315 struct bnxt_ntuple_filter *new_fltr, *fltr;
1316 u32 flow_type = fs->flow_type & 0xff;
1317 struct bnxt_l2_filter *l2_fltr;
1318 struct bnxt_flow_masks *fmasks;
1319 struct flow_keys *fkeys;
1327 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1328 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1329 if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
1332 if (flow_type == IP_USER_FLOW) {
1333 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
1334 &fs->m_u.usr_ip4_spec))
1338 if (flow_type == IPV6_USER_FLOW) {
1339 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
1340 &fs->m_u.usr_ip6_spec))
1344 new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
1348 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
1349 atomic_inc(&l2_fltr->refcnt);
1350 new_fltr->l2_fltr = l2_fltr;
1351 fmasks = &new_fltr->fmasks;
1352 fkeys = &new_fltr->fkeys;
1355 switch (flow_type) {
1356 case IP_USER_FLOW: {
1357 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
1358 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
1360 fkeys->basic.ip_proto = ip_spec->proto;
1361 fkeys->basic.n_proto = htons(ETH_P_IP);
1362 fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1363 fmasks->addrs.v4addrs.src = ip_mask->ip4src;
1364 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1365 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
1370 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
1371 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
1373 fkeys->basic.ip_proto = IPPROTO_TCP;
1374 if (flow_type == UDP_V4_FLOW)
1375 fkeys->basic.ip_proto = IPPROTO_UDP;
1376 fkeys->basic.n_proto = htons(ETH_P_IP);
1377 fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1378 fmasks->addrs.v4addrs.src = ip_mask->ip4src;
1379 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1380 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
1381 fkeys->ports.src = ip_spec->psrc;
1382 fmasks->ports.src = ip_mask->psrc;
1383 fkeys->ports.dst = ip_spec->pdst;
1384 fmasks->ports.dst = ip_mask->pdst;
1387 case IPV6_USER_FLOW: {
1388 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
1389 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
1391 fkeys->basic.ip_proto = ip_spec->l4_proto;
1392 fkeys->basic.n_proto = htons(ETH_P_IPV6);
1393 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
1394 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
1395 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
1396 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
1401 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
1402 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
1404 fkeys->basic.ip_proto = IPPROTO_TCP;
1405 if (flow_type == UDP_V6_FLOW)
1406 fkeys->basic.ip_proto = IPPROTO_UDP;
1407 fkeys->basic.n_proto = htons(ETH_P_IPV6);
1409 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
1410 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
1411 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
1412 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
1413 fkeys->ports.src = ip_spec->psrc;
1414 fmasks->ports.src = ip_mask->psrc;
1415 fkeys->ports.dst = ip_spec->pdst;
1416 fmasks->ports.dst = ip_mask->pdst;
1423 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
1426 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
1428 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
1436 new_fltr->base.flags = BNXT_ACT_NO_AGING;
1437 if (fs->flow_type & FLOW_RSS) {
1438 struct bnxt_rss_ctx *rss_ctx;
1440 new_fltr->base.fw_vnic_id = 0;
1441 new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
1442 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
1444 new_fltr->base.fw_vnic_id = rss_ctx->index;
1450 if (fs->ring_cookie == RX_CLS_FLOW_DISC)
1451 new_fltr->base.flags |= BNXT_ACT_DROP;
1453 new_fltr->base.rxq = ring;
1454 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
1455 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
1457 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
1459 bnxt_del_ntp_filter(bp, new_fltr);
1462 fs->location = new_fltr->base.sw_id;
1467 atomic_dec(&l2_fltr->refcnt);
1472 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1474 struct ethtool_rx_flow_spec *fs = &cmd->fs;
1475 u32 ring, flow_type;
1479 if (!netif_running(bp->dev))
1481 if (!(bp->flags & BNXT_FLAG_RFS))
1483 if (fs->location != RX_CLS_LOC_ANY)
1486 flow_type = fs->flow_type;
1487 if ((flow_type == IP_USER_FLOW ||
1488 flow_type == IPV6_USER_FLOW) &&
1489 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
1491 if (flow_type & FLOW_MAC_EXT)
1493 flow_type &= ~FLOW_EXT;
1495 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
1496 return bnxt_add_ntuple_cls_rule(bp, cmd);
1498 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1499 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1500 if (BNXT_VF(bp) && vf)
1502 if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
1504 if (!vf && ring >= bp->rx_nr_rings)
1507 if (flow_type == ETHER_FLOW)
1508 rc = bnxt_add_l2_cls_rule(bp, fs);
1510 rc = bnxt_add_ntuple_cls_rule(bp, cmd);
1514 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1516 struct ethtool_rx_flow_spec *fs = &cmd->fs;
1517 struct bnxt_filter_base *fltr_base;
1518 struct bnxt_ntuple_filter *fltr;
1519 u32 id = fs->location;
1522 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
1523 BNXT_L2_FLTR_HASH_SIZE, id);
1525 struct bnxt_l2_filter *l2_fltr;
1527 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
1529 bnxt_hwrm_l2_filter_free(bp, l2_fltr);
1530 bnxt_del_l2_filter(bp, l2_fltr);
1533 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1534 BNXT_NTP_FLTR_HASH_SIZE, id);
1540 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1541 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
1546 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
1547 bnxt_del_ntp_filter(bp, fltr);
1551 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1553 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1554 return RXH_IP_SRC | RXH_IP_DST;
1558 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1560 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1561 return RXH_IP_SRC | RXH_IP_DST;
1565 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1568 switch (cmd->flow_type) {
1570 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1571 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1572 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1573 cmd->data |= get_ethtool_ipv4_rss(bp);
1576 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1577 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1578 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1580 case AH_ESP_V4_FLOW:
1581 if (bp->rss_hash_cfg &
1582 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1583 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
1584 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1585 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1591 cmd->data |= get_ethtool_ipv4_rss(bp);
1595 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1596 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1597 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1598 cmd->data |= get_ethtool_ipv6_rss(bp);
1601 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1602 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1603 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1605 case AH_ESP_V6_FLOW:
1606 if (bp->rss_hash_cfg &
1607 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1608 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
1609 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1610 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1616 cmd->data |= get_ethtool_ipv6_rss(bp);
1622 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1623 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1625 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1627 u32 rss_hash_cfg = bp->rss_hash_cfg;
1630 if (cmd->data == RXH_4TUPLE)
1632 else if (cmd->data == RXH_2TUPLE)
1634 else if (!cmd->data)
1639 if (cmd->flow_type == TCP_V4_FLOW) {
1640 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1642 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1643 } else if (cmd->flow_type == UDP_V4_FLOW) {
1644 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1646 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1648 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1649 } else if (cmd->flow_type == TCP_V6_FLOW) {
1650 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1652 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1653 } else if (cmd->flow_type == UDP_V6_FLOW) {
1654 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1656 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1658 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1659 } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
1660 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
1661 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
1663 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1664 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
1666 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1667 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
1668 } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
1669 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
1670 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
1672 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1673 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
1675 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1676 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
1677 } else if (tuple == 4) {
1681 switch (cmd->flow_type) {
1685 case AH_ESP_V4_FLOW:
1690 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1692 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1698 case AH_ESP_V6_FLOW:
1703 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1705 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1709 if (bp->rss_hash_cfg == rss_hash_cfg)
1712 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
1713 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1714 bp->rss_hash_cfg = rss_hash_cfg;
1715 if (netif_running(bp->dev)) {
1716 bnxt_close_nic(bp, false, false);
1717 rc = bnxt_open_nic(bp, false, false);
1722 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1725 struct bnxt *bp = netdev_priv(dev);
1729 case ETHTOOL_GRXRINGS:
1730 cmd->data = bp->rx_nr_rings;
1733 case ETHTOOL_GRXCLSRLCNT:
1734 cmd->rule_cnt = bp->ntp_fltr_count;
1735 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
1738 case ETHTOOL_GRXCLSRLALL:
1739 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1742 case ETHTOOL_GRXCLSRULE:
1743 rc = bnxt_grxclsrule(bp, cmd);
1747 rc = bnxt_grxfh(bp, cmd);
1758 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1760 struct bnxt *bp = netdev_priv(dev);
1765 rc = bnxt_srxfh(bp, cmd);
1768 case ETHTOOL_SRXCLSRLINS:
1769 rc = bnxt_srxclsrlins(bp, cmd);
1772 case ETHTOOL_SRXCLSRLDEL:
1773 rc = bnxt_srxclsrldel(bp, cmd);
1783 u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1785 struct bnxt *bp = netdev_priv(dev);
1787 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1788 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
1789 BNXT_RSS_TABLE_ENTRIES_P5;
1790 return HW_HASH_INDEX_SIZE;
1793 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1795 return HW_HASH_KEY_SIZE;
1798 static int bnxt_get_rxfh(struct net_device *dev,
1799 struct ethtool_rxfh_param *rxfh)
1801 struct bnxt_rss_ctx *rss_ctx = NULL;
1802 struct bnxt *bp = netdev_priv(dev);
1803 u32 *indir_tbl = bp->rss_indir_tbl;
1804 struct bnxt_vnic_info *vnic;
1807 rxfh->hfunc = ETH_RSS_HASH_TOP;
1812 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
1813 if (rxfh->rss_context) {
1814 struct ethtool_rxfh_context *ctx;
1816 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
1819 indir_tbl = ethtool_rxfh_context_indir(ctx);
1820 rss_ctx = ethtool_rxfh_context_priv(ctx);
1821 vnic = &rss_ctx->vnic;
1824 if (rxfh->indir && indir_tbl) {
1825 tbl_size = bnxt_get_rxfh_indir_size(dev);
1826 for (i = 0; i < tbl_size; i++)
1827 rxfh->indir[i] = indir_tbl[i];
1830 if (rxfh->key && vnic->rss_hash_key)
1831 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1836 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
1837 struct bnxt_rss_ctx *rss_ctx,
1838 const struct ethtool_rxfh_param *rxfh)
1842 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
1845 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
1846 bp->rss_hash_key_updated = true;
1850 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
1851 u32 *indir_tbl = bp->rss_indir_tbl;
1854 indir_tbl = ethtool_rxfh_context_indir(ctx);
1855 for (i = 0; i < tbl_size; i++)
1856 indir_tbl[i] = rxfh->indir[i];
1857 pad = bp->rss_indir_tbl_entries - tbl_size;
1859 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
1863 static int bnxt_rxfh_context_check(struct bnxt *bp,
1864 const struct ethtool_rxfh_param *rxfh,
1865 struct netlink_ext_ack *extack)
1867 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1868 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1872 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
1873 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
1877 if (!netif_running(bp->dev)) {
1878 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
1885 static int bnxt_create_rxfh_context(struct net_device *dev,
1886 struct ethtool_rxfh_context *ctx,
1887 const struct ethtool_rxfh_param *rxfh,
1888 struct netlink_ext_ack *extack)
1890 struct bnxt *bp = netdev_priv(dev);
1891 struct bnxt_rss_ctx *rss_ctx;
1892 struct bnxt_vnic_info *vnic;
1895 rc = bnxt_rxfh_context_check(bp, rxfh, extack);
1899 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
1900 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
1901 BNXT_MAX_ETH_RSS_CTX);
1905 if (!bnxt_rfs_capable(bp, true)) {
1906 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
1910 rss_ctx = ethtool_rxfh_context_priv(ctx);
1914 vnic = &rss_ctx->vnic;
1915 vnic->rss_ctx = ctx;
1916 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
1917 vnic->vnic_id = BNXT_VNIC_ID_INVALID;
1918 rc = bnxt_alloc_vnic_rss_table(bp, vnic);
1922 /* Populate defaults in the context */
1923 bnxt_set_dflt_rss_indir_tbl(bp, ctx);
1924 ctx->hfunc = ETH_RSS_HASH_TOP;
1925 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
1926 memcpy(ethtool_rxfh_context_key(ctx),
1927 bp->rss_hash_key, HW_HASH_KEY_SIZE);
1929 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
1931 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
1935 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
1937 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
1940 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
1942 rc = __bnxt_setup_vnic_p5(bp, vnic);
1944 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
1948 rss_ctx->index = rxfh->rss_context;
1951 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
1955 static int bnxt_modify_rxfh_context(struct net_device *dev,
1956 struct ethtool_rxfh_context *ctx,
1957 const struct ethtool_rxfh_param *rxfh,
1958 struct netlink_ext_ack *extack)
1960 struct bnxt *bp = netdev_priv(dev);
1961 struct bnxt_rss_ctx *rss_ctx;
1964 rc = bnxt_rxfh_context_check(bp, rxfh, extack);
1968 rss_ctx = ethtool_rxfh_context_priv(ctx);
1970 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
1972 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
1975 static int bnxt_remove_rxfh_context(struct net_device *dev,
1976 struct ethtool_rxfh_context *ctx,
1978 struct netlink_ext_ack *extack)
1980 struct bnxt *bp = netdev_priv(dev);
1981 struct bnxt_rss_ctx *rss_ctx;
1983 rss_ctx = ethtool_rxfh_context_priv(ctx);
1985 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
1989 static int bnxt_set_rxfh(struct net_device *dev,
1990 struct ethtool_rxfh_param *rxfh,
1991 struct netlink_ext_ack *extack)
1993 struct bnxt *bp = netdev_priv(dev);
1996 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
1999 bnxt_modify_rss(bp, NULL, NULL, rxfh);
2001 if (netif_running(bp->dev)) {
2002 bnxt_close_nic(bp, false, false);
2003 rc = bnxt_open_nic(bp, false, false);
2008 static void bnxt_get_drvinfo(struct net_device *dev,
2009 struct ethtool_drvinfo *info)
2011 struct bnxt *bp = netdev_priv(dev);
2013 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
2014 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
2015 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
2016 info->n_stats = bnxt_get_num_stats(bp);
2017 info->testinfo_len = bp->num_tests;
2018 /* TODO CHIMP_FW: eeprom dump details */
2019 info->eedump_len = 0;
2020 /* TODO CHIMP FW: reg dump details */
2021 info->regdump_len = 0;
2024 static int bnxt_get_regs_len(struct net_device *dev)
2026 struct bnxt *bp = netdev_priv(dev);
2032 reg_len = BNXT_PXP_REG_LEN;
2034 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
2035 reg_len += sizeof(struct pcie_ctx_hw_stats);
2040 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2043 struct pcie_ctx_hw_stats *hw_pcie_stats;
2044 struct hwrm_pcie_qstats_input *req;
2045 struct bnxt *bp = netdev_priv(dev);
2046 dma_addr_t hw_pcie_stats_addr;
2050 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
2052 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
2055 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
2058 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
2059 &hw_pcie_stats_addr);
2060 if (!hw_pcie_stats) {
2061 hwrm_req_drop(bp, req);
2066 hwrm_req_hold(bp, req); /* hold on to slice */
2067 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
2068 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
2069 rc = hwrm_req_send(bp, req);
2071 __le64 *src = (__le64 *)hw_pcie_stats;
2072 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
2075 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
2076 dst[i] = le64_to_cpu(src[i]);
2078 hwrm_req_drop(bp, req);
2081 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2083 struct bnxt *bp = netdev_priv(dev);
2087 memset(&wol->sopass, 0, sizeof(wol->sopass));
2088 if (bp->flags & BNXT_FLAG_WOL_CAP) {
2089 wol->supported = WAKE_MAGIC;
2091 wol->wolopts = WAKE_MAGIC;
2095 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2097 struct bnxt *bp = netdev_priv(dev);
2099 if (wol->wolopts & ~WAKE_MAGIC)
2102 if (wol->wolopts & WAKE_MAGIC) {
2103 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
2106 if (bnxt_hwrm_alloc_wol_fltr(bp))
2112 if (bnxt_hwrm_free_wol_fltr(bp))
2120 /* TODO: support 25GB, 40GB, 50GB with different cable type */
2121 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
2123 linkmode_zero(mode);
2125 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
2126 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
2127 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
2128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
2129 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
2130 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
2131 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
2132 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
2133 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
2134 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
2137 enum bnxt_media_type {
2138 BNXT_MEDIA_UNKNOWN = 0,
2142 BNXT_MEDIA_LR_ER_FR,
2149 static const enum bnxt_media_type bnxt_phy_types[] = {
2150 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
2151 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR,
2152 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
2153 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
2154 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
2155 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
2156 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
2157 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
2158 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
2159 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
2160 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
2161 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
2162 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
2163 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
2164 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
2165 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2166 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2167 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
2168 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
2169 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
2170 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2171 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2172 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
2173 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
2174 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
2175 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
2176 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
2177 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
2178 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2179 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2180 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
2181 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
2182 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
2183 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
2184 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
2185 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
2186 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
2187 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
2188 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
2189 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
2190 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
2191 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
2192 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
2193 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
2194 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
2195 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
2196 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
2197 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
2198 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
2199 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
2200 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
2201 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
2202 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2203 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2206 static enum bnxt_media_type
2207 bnxt_get_media(struct bnxt_link_info *link_info)
2209 switch (link_info->media_type) {
2210 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
2211 return BNXT_MEDIA_TP;
2212 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
2213 return BNXT_MEDIA_CR;
2215 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
2216 return bnxt_phy_types[link_info->phy_type];
2217 return BNXT_MEDIA_UNKNOWN;
2221 enum bnxt_link_speed_indices {
2222 BNXT_LINK_SPEED_UNKNOWN = 0,
2223 BNXT_LINK_SPEED_100MB_IDX,
2224 BNXT_LINK_SPEED_1GB_IDX,
2225 BNXT_LINK_SPEED_10GB_IDX,
2226 BNXT_LINK_SPEED_25GB_IDX,
2227 BNXT_LINK_SPEED_40GB_IDX,
2228 BNXT_LINK_SPEED_50GB_IDX,
2229 BNXT_LINK_SPEED_100GB_IDX,
2230 BNXT_LINK_SPEED_200GB_IDX,
2231 BNXT_LINK_SPEED_400GB_IDX,
2232 __BNXT_LINK_SPEED_END
2235 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
2238 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
2239 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
2240 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
2241 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
2242 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
2243 case BNXT_LINK_SPEED_50GB:
2244 case BNXT_LINK_SPEED_50GB_PAM4:
2245 return BNXT_LINK_SPEED_50GB_IDX;
2246 case BNXT_LINK_SPEED_100GB:
2247 case BNXT_LINK_SPEED_100GB_PAM4:
2248 case BNXT_LINK_SPEED_100GB_PAM4_112:
2249 return BNXT_LINK_SPEED_100GB_IDX;
2250 case BNXT_LINK_SPEED_200GB:
2251 case BNXT_LINK_SPEED_200GB_PAM4:
2252 case BNXT_LINK_SPEED_200GB_PAM4_112:
2253 return BNXT_LINK_SPEED_200GB_IDX;
2254 case BNXT_LINK_SPEED_400GB:
2255 case BNXT_LINK_SPEED_400GB_PAM4:
2256 case BNXT_LINK_SPEED_400GB_PAM4_112:
2257 return BNXT_LINK_SPEED_400GB_IDX;
2258 default: return BNXT_LINK_SPEED_UNKNOWN;
2262 static const enum ethtool_link_mode_bit_indices
2263 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
2264 [BNXT_LINK_SPEED_100MB_IDX] = {
2266 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2269 [BNXT_LINK_SPEED_1GB_IDX] = {
2271 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
2272 /* historically baseT, but DAC is more correctly baseX */
2273 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
2274 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2275 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
2276 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2279 [BNXT_LINK_SPEED_10GB_IDX] = {
2281 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2282 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
2283 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
2284 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
2285 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2286 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2289 [BNXT_LINK_SPEED_25GB_IDX] = {
2291 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2292 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2293 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2296 [BNXT_LINK_SPEED_40GB_IDX] = {
2298 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2299 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2300 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2301 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2304 [BNXT_LINK_SPEED_50GB_IDX] = {
2305 [BNXT_SIG_MODE_NRZ] = {
2306 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2307 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2308 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2310 [BNXT_SIG_MODE_PAM4] = {
2311 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
2312 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
2313 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
2314 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
2317 [BNXT_LINK_SPEED_100GB_IDX] = {
2318 [BNXT_SIG_MODE_NRZ] = {
2319 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2320 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2321 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2322 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2324 [BNXT_SIG_MODE_PAM4] = {
2325 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
2326 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
2327 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
2328 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
2330 [BNXT_SIG_MODE_PAM4_112] = {
2331 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
2332 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
2333 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
2334 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
2337 [BNXT_LINK_SPEED_200GB_IDX] = {
2338 [BNXT_SIG_MODE_PAM4] = {
2339 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
2340 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
2341 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
2342 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
2344 [BNXT_SIG_MODE_PAM4_112] = {
2345 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
2346 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
2347 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
2348 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
2351 [BNXT_LINK_SPEED_400GB_IDX] = {
2352 [BNXT_SIG_MODE_PAM4] = {
2353 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
2354 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
2355 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
2356 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
2358 [BNXT_SIG_MODE_PAM4_112] = {
2359 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
2360 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
2361 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
2362 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
2367 #define BNXT_LINK_MODE_UNKNOWN -1
2369 static enum ethtool_link_mode_bit_indices
2370 bnxt_get_link_mode(struct bnxt_link_info *link_info)
2372 enum ethtool_link_mode_bit_indices link_mode;
2373 enum bnxt_link_speed_indices speed;
2374 enum bnxt_media_type media;
2377 if (link_info->phy_link_status != BNXT_LINK_LINK)
2378 return BNXT_LINK_MODE_UNKNOWN;
2380 media = bnxt_get_media(link_info);
2381 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
2382 speed = bnxt_fw_speed_idx(link_info->link_speed);
2383 sig_mode = link_info->active_fec_sig_mode &
2384 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
2386 speed = bnxt_fw_speed_idx(link_info->req_link_speed);
2387 sig_mode = link_info->req_signal_mode;
2389 if (sig_mode >= BNXT_SIG_MODE_MAX)
2390 return BNXT_LINK_MODE_UNKNOWN;
2392 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
2393 * link mode, but since no such devices exist, the zeroes in the
2394 * map can be conveniently used to represent unknown link modes.
2396 link_mode = bnxt_link_modes[speed][sig_mode][media];
2398 return BNXT_LINK_MODE_UNKNOWN;
2400 switch (link_mode) {
2401 case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
2402 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2403 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
2405 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
2406 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2407 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
2416 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
2417 struct ethtool_link_ksettings *lk_ksettings)
2419 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2421 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
2422 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2423 lk_ksettings->link_modes.supported);
2424 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2425 lk_ksettings->link_modes.supported);
2428 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
2429 link_info->support_pam4_auto_speeds)
2430 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2431 lk_ksettings->link_modes.supported);
2433 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2436 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
2437 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2438 lk_ksettings->link_modes.advertising);
2439 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
2440 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2441 lk_ksettings->link_modes.advertising);
2442 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
2443 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2444 lk_ksettings->link_modes.lp_advertising);
2445 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
2446 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2447 lk_ksettings->link_modes.lp_advertising);
2450 static const u16 bnxt_nrz_speed_masks[] = {
2451 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
2452 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
2453 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
2454 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
2455 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
2456 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
2457 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
2458 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2461 static const u16 bnxt_pam4_speed_masks[] = {
2462 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
2463 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
2464 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
2465 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2468 static const u16 bnxt_nrz_speeds2_masks[] = {
2469 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
2470 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
2471 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
2472 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
2473 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
2474 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
2475 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2478 static const u16 bnxt_pam4_speeds2_masks[] = {
2479 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
2480 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
2481 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
2482 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
2485 static const u16 bnxt_pam4_112_speeds2_masks[] = {
2486 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
2487 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
2488 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
2491 static enum bnxt_link_speed_indices
2492 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
2498 case BNXT_SIG_MODE_NRZ:
2499 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2500 speeds = bnxt_nrz_speeds2_masks;
2501 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
2503 speeds = bnxt_nrz_speed_masks;
2504 len = ARRAY_SIZE(bnxt_nrz_speed_masks);
2507 case BNXT_SIG_MODE_PAM4:
2508 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2509 speeds = bnxt_pam4_speeds2_masks;
2510 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
2512 speeds = bnxt_pam4_speed_masks;
2513 len = ARRAY_SIZE(bnxt_pam4_speed_masks);
2516 case BNXT_SIG_MODE_PAM4_112:
2517 speeds = bnxt_pam4_112_speeds2_masks;
2518 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
2521 return BNXT_LINK_SPEED_UNKNOWN;
2524 for (idx = 0; idx < len; idx++) {
2525 if (speeds[idx] == speed_msk)
2529 return BNXT_LINK_SPEED_UNKNOWN;
2532 #define BNXT_FW_SPEED_MSK_BITS 16
2535 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2536 u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2538 enum ethtool_link_mode_bit_indices link_mode;
2539 enum bnxt_link_speed_indices speed;
2542 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
2543 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
2547 link_mode = bnxt_link_modes[speed][sig_mode][media];
2551 linkmode_set_bit(link_mode, et_mask);
2556 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2557 u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2560 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2565 /* list speeds for all media if unknown */
2566 for (media = 1; media < __BNXT_MEDIA_END; media++)
2567 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2572 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
2573 enum bnxt_media_type media,
2574 struct ethtool_link_ksettings *lk_ksettings)
2576 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2577 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2578 u16 phy_flags = bp->phy_flags;
2580 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2581 sp_nrz = link_info->support_speeds2;
2582 sp_pam4 = link_info->support_speeds2;
2583 sp_pam4_112 = link_info->support_speeds2;
2585 sp_nrz = link_info->support_speeds;
2586 sp_pam4 = link_info->support_pam4_speeds;
2588 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2589 lk_ksettings->link_modes.supported);
2590 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2591 lk_ksettings->link_modes.supported);
2592 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2593 phy_flags, lk_ksettings->link_modes.supported);
2597 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
2598 enum bnxt_media_type media,
2599 struct ethtool_link_ksettings *lk_ksettings)
2601 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2602 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2603 u16 phy_flags = bp->phy_flags;
2605 sp_nrz = link_info->advertising;
2606 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2607 sp_pam4 = link_info->advertising;
2608 sp_pam4_112 = link_info->advertising;
2610 sp_pam4 = link_info->advertising_pam4;
2612 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2613 lk_ksettings->link_modes.advertising);
2614 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2615 lk_ksettings->link_modes.advertising);
2616 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2617 phy_flags, lk_ksettings->link_modes.advertising);
2621 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
2622 enum bnxt_media_type media,
2623 struct ethtool_link_ksettings *lk_ksettings)
2625 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2626 u16 phy_flags = bp->phy_flags;
2628 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
2629 BNXT_SIG_MODE_NRZ, phy_flags,
2630 lk_ksettings->link_modes.lp_advertising);
2631 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
2632 BNXT_SIG_MODE_PAM4, phy_flags,
2633 lk_ksettings->link_modes.lp_advertising);
2636 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
2637 u16 speed_msk, const unsigned long *et_mask,
2638 enum ethtool_link_mode_bit_indices mode)
2640 bool mode_desired = linkmode_test_bit(mode, et_mask);
2645 /* enabled speeds for installed media should override */
2646 if (installed_media && mode_desired) {
2647 *speeds |= speed_msk;
2648 *delta |= speed_msk;
2652 /* many to one mapping, only allow one change per fw_speed bit */
2653 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
2654 *speeds ^= speed_msk;
2655 *delta |= speed_msk;
2659 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
2660 const unsigned long *et_mask)
2662 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2663 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
2664 enum bnxt_media_type media = bnxt_get_media(link_info);
2665 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
2666 u32 delta_pam4_112 = 0;
2671 adv = &link_info->advertising;
2672 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2673 adv_pam4 = &link_info->advertising;
2674 adv_pam4_112 = &link_info->advertising;
2675 sp_msks = bnxt_nrz_speeds2_masks;
2676 sp_pam4_msks = bnxt_pam4_speeds2_masks;
2677 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
2679 adv_pam4 = &link_info->advertising_pam4;
2680 sp_msks = bnxt_nrz_speed_masks;
2681 sp_pam4_msks = bnxt_pam4_speed_masks;
2683 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
2684 /* accept any legal media from user */
2685 for (m = 1; m < __BNXT_MEDIA_END; m++) {
2686 bnxt_update_speed(&delta_nrz, m == media,
2687 adv, sp_msks[i], et_mask,
2688 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
2689 bnxt_update_speed(&delta_pam4, m == media,
2690 adv_pam4, sp_pam4_msks[i], et_mask,
2691 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
2695 bnxt_update_speed(&delta_pam4_112, m == media,
2696 adv_pam4_112, sp_pam4_112_msks[i], et_mask,
2697 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
2702 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
2703 struct ethtool_link_ksettings *lk_ksettings)
2705 u16 fec_cfg = link_info->fec_cfg;
2707 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
2708 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2709 lk_ksettings->link_modes.advertising);
2712 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2713 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2714 lk_ksettings->link_modes.advertising);
2715 if (fec_cfg & BNXT_FEC_ENC_RS)
2716 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2717 lk_ksettings->link_modes.advertising);
2718 if (fec_cfg & BNXT_FEC_ENC_LLRS)
2719 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2720 lk_ksettings->link_modes.advertising);
2723 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
2724 struct ethtool_link_ksettings *lk_ksettings)
2726 u16 fec_cfg = link_info->fec_cfg;
2728 if (fec_cfg & BNXT_FEC_NONE) {
2729 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2730 lk_ksettings->link_modes.supported);
2733 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
2734 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2735 lk_ksettings->link_modes.supported);
2736 if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
2737 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2738 lk_ksettings->link_modes.supported);
2739 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
2740 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2741 lk_ksettings->link_modes.supported);
2744 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
2746 switch (fw_link_speed) {
2747 case BNXT_LINK_SPEED_100MB:
2749 case BNXT_LINK_SPEED_1GB:
2751 case BNXT_LINK_SPEED_2_5GB:
2753 case BNXT_LINK_SPEED_10GB:
2755 case BNXT_LINK_SPEED_20GB:
2757 case BNXT_LINK_SPEED_25GB:
2759 case BNXT_LINK_SPEED_40GB:
2761 case BNXT_LINK_SPEED_50GB:
2762 case BNXT_LINK_SPEED_50GB_PAM4:
2764 case BNXT_LINK_SPEED_100GB:
2765 case BNXT_LINK_SPEED_100GB_PAM4:
2766 case BNXT_LINK_SPEED_100GB_PAM4_112:
2767 return SPEED_100000;
2768 case BNXT_LINK_SPEED_200GB:
2769 case BNXT_LINK_SPEED_200GB_PAM4:
2770 case BNXT_LINK_SPEED_200GB_PAM4_112:
2771 return SPEED_200000;
2772 case BNXT_LINK_SPEED_400GB:
2773 case BNXT_LINK_SPEED_400GB_PAM4:
2774 case BNXT_LINK_SPEED_400GB_PAM4_112:
2775 return SPEED_400000;
2777 return SPEED_UNKNOWN;
2781 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
2782 struct bnxt_link_info *link_info)
2784 struct ethtool_link_settings *base = &lk_ksettings->base;
2786 if (link_info->link_state == BNXT_LINK_STATE_UP) {
2787 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
2788 base->duplex = DUPLEX_HALF;
2789 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2790 base->duplex = DUPLEX_FULL;
2791 lk_ksettings->lanes = link_info->active_lanes;
2792 } else if (!link_info->autoneg) {
2793 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
2794 base->duplex = DUPLEX_HALF;
2795 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
2796 base->duplex = DUPLEX_FULL;
2800 static int bnxt_get_link_ksettings(struct net_device *dev,
2801 struct ethtool_link_ksettings *lk_ksettings)
2803 struct ethtool_link_settings *base = &lk_ksettings->base;
2804 enum ethtool_link_mode_bit_indices link_mode;
2805 struct bnxt *bp = netdev_priv(dev);
2806 struct bnxt_link_info *link_info;
2807 enum bnxt_media_type media;
2809 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
2810 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
2811 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
2812 base->duplex = DUPLEX_UNKNOWN;
2813 base->speed = SPEED_UNKNOWN;
2814 link_info = &bp->link_info;
2816 mutex_lock(&bp->link_lock);
2817 bnxt_get_ethtool_modes(link_info, lk_ksettings);
2818 media = bnxt_get_media(link_info);
2819 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
2820 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
2821 link_mode = bnxt_get_link_mode(link_info);
2822 if (link_mode != BNXT_LINK_MODE_UNKNOWN)
2823 ethtool_params_from_link_mode(lk_ksettings, link_mode);
2825 bnxt_get_default_speeds(lk_ksettings, link_info);
2827 if (link_info->autoneg) {
2828 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
2829 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2830 lk_ksettings->link_modes.advertising);
2831 base->autoneg = AUTONEG_ENABLE;
2832 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
2833 if (link_info->phy_link_status == BNXT_LINK_LINK)
2834 bnxt_get_all_ethtool_lp_speeds(link_info, media,
2837 base->autoneg = AUTONEG_DISABLE;
2840 base->port = PORT_NONE;
2841 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2842 base->port = PORT_TP;
2843 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2844 lk_ksettings->link_modes.supported);
2845 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2846 lk_ksettings->link_modes.advertising);
2848 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2849 lk_ksettings->link_modes.supported);
2850 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2851 lk_ksettings->link_modes.advertising);
2853 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
2854 base->port = PORT_DA;
2856 base->port = PORT_FIBRE;
2858 base->phy_address = link_info->phy_addr;
2859 mutex_unlock(&bp->link_lock);
2865 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
2867 struct bnxt *bp = netdev_priv(dev);
2868 struct bnxt_link_info *link_info = &bp->link_info;
2869 u16 support_pam4_spds = link_info->support_pam4_speeds;
2870 u16 support_spds2 = link_info->support_speeds2;
2871 u16 support_spds = link_info->support_speeds;
2872 u8 sig_mode = BNXT_SIG_MODE_NRZ;
2873 u32 lanes_needed = 1;
2876 switch (ethtool_speed) {
2878 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
2879 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
2882 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
2883 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
2884 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2887 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
2888 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
2891 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
2892 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
2893 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2896 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
2897 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
2902 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
2903 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
2904 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2907 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
2908 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
2909 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2914 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
2915 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
2917 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2919 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
2920 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
2921 sig_mode = BNXT_SIG_MODE_PAM4;
2922 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
2923 fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
2924 sig_mode = BNXT_SIG_MODE_PAM4;
2928 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
2929 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
2930 lanes != 2 && lanes != 1) {
2931 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
2933 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
2934 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
2935 sig_mode = BNXT_SIG_MODE_PAM4;
2937 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
2939 fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
2940 sig_mode = BNXT_SIG_MODE_PAM4;
2942 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
2943 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
2944 sig_mode = BNXT_SIG_MODE_PAM4_112;
2948 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
2949 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
2950 sig_mode = BNXT_SIG_MODE_PAM4;
2952 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
2954 fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
2955 sig_mode = BNXT_SIG_MODE_PAM4;
2957 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
2958 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
2959 sig_mode = BNXT_SIG_MODE_PAM4_112;
2964 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
2966 fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
2967 sig_mode = BNXT_SIG_MODE_PAM4;
2969 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
2970 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
2971 sig_mode = BNXT_SIG_MODE_PAM4_112;
2978 netdev_err(dev, "unsupported speed!\n");
2982 if (lanes && lanes != lanes_needed) {
2983 netdev_err(dev, "unsupported number of lanes for speed\n");
2987 if (link_info->req_link_speed == fw_speed &&
2988 link_info->req_signal_mode == sig_mode &&
2989 link_info->autoneg == 0)
2992 link_info->req_link_speed = fw_speed;
2993 link_info->req_signal_mode = sig_mode;
2994 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
2995 link_info->autoneg = 0;
2996 link_info->advertising = 0;
2997 link_info->advertising_pam4 = 0;
3002 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
3004 u16 fw_speed_mask = 0;
3006 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
3007 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
3008 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
3010 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
3011 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
3012 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
3014 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
3015 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
3017 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
3018 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
3020 return fw_speed_mask;
3023 static int bnxt_set_link_ksettings(struct net_device *dev,
3024 const struct ethtool_link_ksettings *lk_ksettings)
3026 struct bnxt *bp = netdev_priv(dev);
3027 struct bnxt_link_info *link_info = &bp->link_info;
3028 const struct ethtool_link_settings *base = &lk_ksettings->base;
3029 bool set_pause = false;
3030 u32 speed, lanes = 0;
3033 if (!BNXT_PHY_CFG_ABLE(bp))
3036 mutex_lock(&bp->link_lock);
3037 if (base->autoneg == AUTONEG_ENABLE) {
3038 bnxt_set_ethtool_speeds(link_info,
3039 lk_ksettings->link_modes.advertising);
3040 link_info->autoneg |= BNXT_AUTONEG_SPEED;
3041 if (!link_info->advertising && !link_info->advertising_pam4) {
3042 link_info->advertising = link_info->support_auto_speeds;
3043 link_info->advertising_pam4 =
3044 link_info->support_pam4_auto_speeds;
3046 /* any change to autoneg will cause link change, therefore the
3047 * driver should put back the original pause setting in autoneg
3049 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
3052 u8 phy_type = link_info->phy_type;
3054 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
3055 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
3056 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
3057 netdev_err(dev, "10GBase-T devices must autoneg\n");
3059 goto set_setting_exit;
3061 if (base->duplex == DUPLEX_HALF) {
3062 netdev_err(dev, "HALF DUPLEX is not supported!\n");
3064 goto set_setting_exit;
3066 speed = base->speed;
3067 lanes = lk_ksettings->lanes;
3068 rc = bnxt_force_link_speed(dev, speed, lanes);
3070 if (rc == -EALREADY)
3072 goto set_setting_exit;
3076 if (netif_running(dev))
3077 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
3080 mutex_unlock(&bp->link_lock);
3084 static int bnxt_get_fecparam(struct net_device *dev,
3085 struct ethtool_fecparam *fec)
3087 struct bnxt *bp = netdev_priv(dev);
3088 struct bnxt_link_info *link_info;
3092 link_info = &bp->link_info;
3093 fec_cfg = link_info->fec_cfg;
3094 active_fec = link_info->active_fec_sig_mode &
3095 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
3096 if (fec_cfg & BNXT_FEC_NONE) {
3097 fec->fec = ETHTOOL_FEC_NONE;
3098 fec->active_fec = ETHTOOL_FEC_NONE;
3101 if (fec_cfg & BNXT_FEC_AUTONEG)
3102 fec->fec |= ETHTOOL_FEC_AUTO;
3103 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
3104 fec->fec |= ETHTOOL_FEC_BASER;
3105 if (fec_cfg & BNXT_FEC_ENC_RS)
3106 fec->fec |= ETHTOOL_FEC_RS;
3107 if (fec_cfg & BNXT_FEC_ENC_LLRS)
3108 fec->fec |= ETHTOOL_FEC_LLRS;
3110 switch (active_fec) {
3111 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
3112 fec->active_fec |= ETHTOOL_FEC_BASER;
3114 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
3115 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
3116 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
3117 fec->active_fec |= ETHTOOL_FEC_RS;
3119 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
3120 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
3121 fec->active_fec |= ETHTOOL_FEC_LLRS;
3123 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
3124 fec->active_fec |= ETHTOOL_FEC_OFF;
3130 static void bnxt_get_fec_stats(struct net_device *dev,
3131 struct ethtool_fec_stats *fec_stats)
3133 struct bnxt *bp = netdev_priv(dev);
3136 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
3139 rx = bp->rx_port_stats_ext.sw_stats;
3140 fec_stats->corrected_bits.total =
3141 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
3143 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
3146 fec_stats->corrected_blocks.total =
3147 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
3148 fec_stats->uncorrectable_blocks.total =
3149 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
3152 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
3155 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
3157 if (fec & ETHTOOL_FEC_BASER)
3158 fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
3159 else if (fec & ETHTOOL_FEC_RS)
3160 fw_fec |= BNXT_FEC_RS_ON(link_info);
3161 else if (fec & ETHTOOL_FEC_LLRS)
3162 fw_fec |= BNXT_FEC_LLRS_ON;
3166 static int bnxt_set_fecparam(struct net_device *dev,
3167 struct ethtool_fecparam *fecparam)
3169 struct hwrm_port_phy_cfg_input *req;
3170 struct bnxt *bp = netdev_priv(dev);
3171 struct bnxt_link_info *link_info;
3172 u32 new_cfg, fec = fecparam->fec;
3176 link_info = &bp->link_info;
3177 fec_cfg = link_info->fec_cfg;
3178 if (fec_cfg & BNXT_FEC_NONE)
3181 if (fec & ETHTOOL_FEC_OFF) {
3182 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
3183 BNXT_FEC_ALL_OFF(link_info);
3186 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
3187 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
3188 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
3189 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
3192 if (fec & ETHTOOL_FEC_AUTO) {
3193 if (!link_info->autoneg)
3195 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
3197 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
3201 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
3204 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3205 rc = hwrm_req_send(bp, req);
3206 /* update current settings */
3208 mutex_lock(&bp->link_lock);
3209 bnxt_update_link(bp, false);
3210 mutex_unlock(&bp->link_lock);
3215 static void bnxt_get_pauseparam(struct net_device *dev,
3216 struct ethtool_pauseparam *epause)
3218 struct bnxt *bp = netdev_priv(dev);
3219 struct bnxt_link_info *link_info = &bp->link_info;
3223 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
3224 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
3225 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
3228 static void bnxt_get_pause_stats(struct net_device *dev,
3229 struct ethtool_pause_stats *epstat)
3231 struct bnxt *bp = netdev_priv(dev);
3234 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
3237 rx = bp->port_stats.sw_stats;
3238 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3240 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
3241 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
3244 static int bnxt_set_pauseparam(struct net_device *dev,
3245 struct ethtool_pauseparam *epause)
3248 struct bnxt *bp = netdev_priv(dev);
3249 struct bnxt_link_info *link_info = &bp->link_info;
3251 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
3254 mutex_lock(&bp->link_lock);
3255 if (epause->autoneg) {
3256 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3261 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
3262 link_info->req_flow_ctrl = 0;
3264 /* when transition from auto pause to force pause,
3265 * force a link change
3267 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
3268 link_info->force_link_chng = true;
3269 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
3270 link_info->req_flow_ctrl = 0;
3272 if (epause->rx_pause)
3273 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
3275 if (epause->tx_pause)
3276 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
3278 if (netif_running(dev))
3279 rc = bnxt_hwrm_set_pause(bp);
3282 mutex_unlock(&bp->link_lock);
3286 static u32 bnxt_get_link(struct net_device *dev)
3288 struct bnxt *bp = netdev_priv(dev);
3290 /* TODO: handle MF, VF, driver close case */
3291 return BNXT_LINK_IS_UP(bp);
3294 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
3295 struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
3297 struct hwrm_nvm_get_dev_info_output *resp;
3298 struct hwrm_nvm_get_dev_info_input *req;
3304 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
3308 resp = hwrm_req_hold(bp, req);
3309 rc = hwrm_req_send(bp, req);
3311 memcpy(nvm_dev_info, resp, sizeof(*resp));
3312 hwrm_req_drop(bp, req);
3316 static void bnxt_print_admin_err(struct bnxt *bp)
3318 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
3321 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
3322 u16 ext, u16 *index, u32 *item_length,
3325 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
3326 u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
3327 u32 dir_item_len, const u8 *data,
3330 struct bnxt *bp = netdev_priv(dev);
3331 struct hwrm_nvm_write_input *req;
3334 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
3338 if (data_len && data) {
3339 dma_addr_t dma_handle;
3342 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
3344 hwrm_req_drop(bp, req);
3348 req->dir_data_length = cpu_to_le32(data_len);
3350 memcpy(kmem, data, data_len);
3351 req->host_src_addr = cpu_to_le64(dma_handle);
3354 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
3355 req->dir_type = cpu_to_le16(dir_type);
3356 req->dir_ordinal = cpu_to_le16(dir_ordinal);
3357 req->dir_ext = cpu_to_le16(dir_ext);
3358 req->dir_attr = cpu_to_le16(dir_attr);
3359 req->dir_item_length = cpu_to_le32(dir_item_len);
3360 rc = hwrm_req_send(bp, req);
3363 bnxt_print_admin_err(bp);
3367 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
3368 u8 self_reset, u8 flags)
3370 struct bnxt *bp = netdev_priv(dev);
3371 struct hwrm_fw_reset_input *req;
3374 if (!bnxt_hwrm_reset_permitted(bp)) {
3375 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
3379 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
3383 req->embedded_proc_type = proc_type;
3384 req->selfrst_status = self_reset;
3387 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
3388 rc = hwrm_req_send_silent(bp, req);
3390 rc = hwrm_req_send(bp, req);
3392 bnxt_print_admin_err(bp);
3397 static int bnxt_firmware_reset(struct net_device *dev,
3398 enum bnxt_nvm_directory_type dir_type)
3400 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
3401 u8 proc_type, flags = 0;
3403 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
3404 /* (e.g. when firmware isn't already running) */
3406 case BNX_DIR_TYPE_CHIMP_PATCH:
3407 case BNX_DIR_TYPE_BOOTCODE:
3408 case BNX_DIR_TYPE_BOOTCODE_2:
3409 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
3410 /* Self-reset ChiMP upon next PCIe reset: */
3411 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3413 case BNX_DIR_TYPE_APE_FW:
3414 case BNX_DIR_TYPE_APE_PATCH:
3415 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
3416 /* Self-reset APE upon next PCIe reset: */
3417 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3419 case BNX_DIR_TYPE_KONG_FW:
3420 case BNX_DIR_TYPE_KONG_PATCH:
3421 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
3423 case BNX_DIR_TYPE_BONO_FW:
3424 case BNX_DIR_TYPE_BONO_PATCH:
3425 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
3431 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
3434 static int bnxt_firmware_reset_chip(struct net_device *dev)
3436 struct bnxt *bp = netdev_priv(dev);
3439 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
3440 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
3442 return bnxt_hwrm_firmware_reset(dev,
3443 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
3444 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
3448 static int bnxt_firmware_reset_ap(struct net_device *dev)
3450 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
3451 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
3455 static int bnxt_flash_firmware(struct net_device *dev,
3464 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
3467 case BNX_DIR_TYPE_BOOTCODE:
3468 case BNX_DIR_TYPE_BOOTCODE_2:
3469 code_type = CODE_BOOT;
3471 case BNX_DIR_TYPE_CHIMP_PATCH:
3472 code_type = CODE_CHIMP_PATCH;
3474 case BNX_DIR_TYPE_APE_FW:
3475 code_type = CODE_MCTP_PASSTHRU;
3477 case BNX_DIR_TYPE_APE_PATCH:
3478 code_type = CODE_APE_PATCH;
3480 case BNX_DIR_TYPE_KONG_FW:
3481 code_type = CODE_KONG_FW;
3483 case BNX_DIR_TYPE_KONG_PATCH:
3484 code_type = CODE_KONG_PATCH;
3486 case BNX_DIR_TYPE_BONO_FW:
3487 code_type = CODE_BONO_FW;
3489 case BNX_DIR_TYPE_BONO_PATCH:
3490 code_type = CODE_BONO_PATCH;
3493 netdev_err(dev, "Unsupported directory entry type: %u\n",
3497 if (fw_size < sizeof(struct bnxt_fw_header)) {
3498 netdev_err(dev, "Invalid firmware file size: %u\n",
3499 (unsigned int)fw_size);
3502 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
3503 netdev_err(dev, "Invalid firmware signature: %08X\n",
3504 le32_to_cpu(header->signature));
3507 if (header->code_type != code_type) {
3508 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
3509 code_type, header->code_type);
3512 if (header->device != DEVICE_CUMULUS_FAMILY) {
3513 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
3514 DEVICE_CUMULUS_FAMILY, header->device);
3517 /* Confirm the CRC32 checksum of the file: */
3518 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3519 sizeof(stored_crc)));
3520 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3521 if (calculated_crc != stored_crc) {
3522 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
3523 (unsigned long)stored_crc,
3524 (unsigned long)calculated_crc);
3527 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3528 0, 0, 0, fw_data, fw_size);
3529 if (rc == 0) /* Firmware update successful */
3530 rc = bnxt_firmware_reset(dev, dir_type);
3535 static int bnxt_flash_microcode(struct net_device *dev,
3540 struct bnxt_ucode_trailer *trailer;
3545 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
3546 netdev_err(dev, "Invalid microcode file size: %u\n",
3547 (unsigned int)fw_size);
3550 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
3552 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
3553 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
3554 le32_to_cpu(trailer->sig));
3557 if (le16_to_cpu(trailer->dir_type) != dir_type) {
3558 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
3559 dir_type, le16_to_cpu(trailer->dir_type));
3562 if (le16_to_cpu(trailer->trailer_length) <
3563 sizeof(struct bnxt_ucode_trailer)) {
3564 netdev_err(dev, "Invalid microcode trailer length: %d\n",
3565 le16_to_cpu(trailer->trailer_length));
3569 /* Confirm the CRC32 checksum of the file: */
3570 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3571 sizeof(stored_crc)));
3572 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3573 if (calculated_crc != stored_crc) {
3575 "CRC32 (%08lX) does not match calculated: %08lX\n",
3576 (unsigned long)stored_crc,
3577 (unsigned long)calculated_crc);
3580 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3581 0, 0, 0, fw_data, fw_size);
3586 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
3589 case BNX_DIR_TYPE_CHIMP_PATCH:
3590 case BNX_DIR_TYPE_BOOTCODE:
3591 case BNX_DIR_TYPE_BOOTCODE_2:
3592 case BNX_DIR_TYPE_APE_FW:
3593 case BNX_DIR_TYPE_APE_PATCH:
3594 case BNX_DIR_TYPE_KONG_FW:
3595 case BNX_DIR_TYPE_KONG_PATCH:
3596 case BNX_DIR_TYPE_BONO_FW:
3597 case BNX_DIR_TYPE_BONO_PATCH:
3604 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
3607 case BNX_DIR_TYPE_AVS:
3608 case BNX_DIR_TYPE_EXP_ROM_MBA:
3609 case BNX_DIR_TYPE_PCIE:
3610 case BNX_DIR_TYPE_TSCF_UCODE:
3611 case BNX_DIR_TYPE_EXT_PHY:
3612 case BNX_DIR_TYPE_CCM:
3613 case BNX_DIR_TYPE_ISCSI_BOOT:
3614 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3615 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3622 static bool bnxt_dir_type_is_executable(u16 dir_type)
3624 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3625 bnxt_dir_type_is_other_exec_format(dir_type);
3628 static int bnxt_flash_firmware_from_file(struct net_device *dev,
3630 const char *filename)
3632 const struct firmware *fw;
3635 rc = request_firmware(&fw, filename, &dev->dev);
3637 netdev_err(dev, "Error %d requesting firmware file: %s\n",
3641 if (bnxt_dir_type_is_ape_bin_format(dir_type))
3642 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
3643 else if (bnxt_dir_type_is_other_exec_format(dir_type))
3644 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
3646 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3647 0, 0, 0, fw->data, fw->size);
3648 release_firmware(fw);
3652 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3653 #define MSG_INVALID_PKG "PKG install error : Invalid package"
3654 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3655 #define MSG_INVALID_DEV "PKG install error : Invalid device"
3656 #define MSG_INTERNAL_ERR "PKG install error : Internal error"
3657 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3658 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3659 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3660 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3661 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3663 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
3664 struct netlink_ext_ack *extack)
3667 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
3668 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
3669 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
3670 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
3671 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
3672 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
3673 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
3675 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
3676 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
3677 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
3678 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
3679 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
3680 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
3681 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
3682 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
3683 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
3684 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
3685 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
3686 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
3687 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
3688 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
3690 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
3691 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
3693 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
3694 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
3695 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
3696 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
3697 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
3698 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
3701 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
3706 #define BNXT_PKG_DMA_SIZE 0x40000
3707 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3708 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3710 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
3711 struct netlink_ext_ack *extack)
3716 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3717 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
3720 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3724 if (fw_size > item_len) {
3725 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
3726 BNX_DIR_ORDINAL_FIRST, 0, 1,
3727 round_up(fw_size, 4096), NULL, 0);
3729 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
3736 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
3737 u32 install_type, struct netlink_ext_ack *extack)
3739 struct hwrm_nvm_install_update_input *install;
3740 struct hwrm_nvm_install_update_output *resp;
3741 struct hwrm_nvm_modify_input *modify;
3742 struct bnxt *bp = netdev_priv(dev);
3743 bool defrag_attempted = false;
3744 dma_addr_t dma_handle;
3752 /* resize before flashing larger image than available space */
3753 rc = bnxt_resize_update_entry(dev, fw->size, extack);
3757 bnxt_hwrm_fw_set_time(bp);
3759 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
3763 /* Try allocating a large DMA buffer first. Older fw will
3764 * cause excessive NVRAM erases when using small blocks.
3766 modify_len = roundup_pow_of_two(fw->size);
3767 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
3769 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
3770 if (!kmem && modify_len > PAGE_SIZE)
3776 hwrm_req_drop(bp, modify);
3780 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
3782 hwrm_req_drop(bp, modify);
3786 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
3787 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
3789 hwrm_req_hold(bp, modify);
3790 modify->host_src_addr = cpu_to_le64(dma_handle);
3792 resp = hwrm_req_hold(bp, install);
3793 if ((install_type & 0xffff) == 0)
3794 install_type >>= 16;
3795 install->install_type = cpu_to_le32(install_type);
3798 u32 copied = 0, len = modify_len;
3800 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3801 BNX_DIR_ORDINAL_FIRST,
3803 &index, &item_len, NULL);
3805 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3808 if (fw->size > item_len) {
3809 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
3814 modify->dir_idx = cpu_to_le16(index);
3816 if (fw->size > modify_len)
3817 modify->flags = BNXT_NVM_MORE_FLAG;
3818 while (copied < fw->size) {
3819 u32 balance = fw->size - copied;
3821 if (balance <= modify_len) {
3824 modify->flags |= BNXT_NVM_LAST_FLAG;
3826 memcpy(kmem, fw->data + copied, len);
3827 modify->len = cpu_to_le32(len);
3828 modify->offset = cpu_to_le32(copied);
3829 rc = hwrm_req_send(bp, modify);
3835 rc = hwrm_req_send_silent(bp, install);
3839 if (defrag_attempted) {
3840 /* We have tried to defragment already in the previous
3841 * iteration. Return with the result for INSTALL_UPDATE
3846 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3849 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
3850 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
3853 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
3855 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
3857 rc = hwrm_req_send_silent(bp, install);
3861 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3863 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
3864 /* FW has cleared NVM area, driver will create
3865 * UPDATE directory and try the flash again
3867 defrag_attempted = true;
3869 rc = bnxt_flash_nvram(bp->dev,
3870 BNX_DIR_TYPE_UPDATE,
3871 BNX_DIR_ORDINAL_FIRST,
3872 0, 0, item_len, NULL, 0);
3878 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
3880 } while (defrag_attempted && !rc);
3883 hwrm_req_drop(bp, modify);
3884 hwrm_req_drop(bp, install);
3887 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
3888 (s8)resp->result, (int)resp->problem_item);
3889 rc = nvm_update_err_to_stderr(dev, resp->result, extack);
3892 bnxt_print_admin_err(bp);
3896 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
3897 u32 install_type, struct netlink_ext_ack *extack)
3899 const struct firmware *fw;
3902 rc = request_firmware(&fw, filename, &dev->dev);
3904 netdev_err(dev, "PKG error %d requesting file: %s\n",
3909 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
3911 release_firmware(fw);
3916 static int bnxt_flash_device(struct net_device *dev,
3917 struct ethtool_flash *flash)
3919 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
3920 netdev_err(dev, "flashdev not supported from a virtual function\n");
3924 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
3925 flash->region > 0xffff)
3926 return bnxt_flash_package_from_file(dev, flash->data,
3927 flash->region, NULL);
3929 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
3932 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
3934 struct hwrm_nvm_get_dir_info_output *output;
3935 struct hwrm_nvm_get_dir_info_input *req;
3936 struct bnxt *bp = netdev_priv(dev);
3939 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
3943 output = hwrm_req_hold(bp, req);
3944 rc = hwrm_req_send(bp, req);
3946 *entries = le32_to_cpu(output->entries);
3947 *length = le32_to_cpu(output->entry_length);
3949 hwrm_req_drop(bp, req);
3953 static int bnxt_get_eeprom_len(struct net_device *dev)
3955 struct bnxt *bp = netdev_priv(dev);
3960 /* The -1 return value allows the entire 32-bit range of offsets to be
3961 * passed via the ethtool command-line utility.
3966 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
3968 struct bnxt *bp = netdev_priv(dev);
3974 dma_addr_t dma_handle;
3975 struct hwrm_nvm_get_dir_entries_input *req;
3977 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
3981 if (!dir_entries || !entry_length)
3984 /* Insert 2 bytes of directory info (count and size of entries) */
3988 *data++ = dir_entries;
3989 *data++ = entry_length;
3991 memset(data, 0xff, len);
3993 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
3997 buflen = mul_u32_u32(dir_entries, entry_length);
3998 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
4000 hwrm_req_drop(bp, req);
4003 req->host_dest_addr = cpu_to_le64(dma_handle);
4005 hwrm_req_hold(bp, req); /* hold the slice */
4006 rc = hwrm_req_send(bp, req);
4008 memcpy(data, buf, len > buflen ? buflen : len);
4009 hwrm_req_drop(bp, req);
4013 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
4014 u32 length, u8 *data)
4016 struct bnxt *bp = netdev_priv(dev);
4019 dma_addr_t dma_handle;
4020 struct hwrm_nvm_read_input *req;
4025 rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
4029 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
4031 hwrm_req_drop(bp, req);
4035 req->host_dest_addr = cpu_to_le64(dma_handle);
4036 req->dir_idx = cpu_to_le16(index);
4037 req->offset = cpu_to_le32(offset);
4038 req->len = cpu_to_le32(length);
4040 hwrm_req_hold(bp, req); /* hold the slice */
4041 rc = hwrm_req_send(bp, req);
4043 memcpy(data, buf, length);
4044 hwrm_req_drop(bp, req);
4048 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
4049 u16 ext, u16 *index, u32 *item_length,
4052 struct hwrm_nvm_find_dir_entry_output *output;
4053 struct hwrm_nvm_find_dir_entry_input *req;
4054 struct bnxt *bp = netdev_priv(dev);
4057 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
4063 req->dir_type = cpu_to_le16(type);
4064 req->dir_ordinal = cpu_to_le16(ordinal);
4065 req->dir_ext = cpu_to_le16(ext);
4066 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
4067 output = hwrm_req_hold(bp, req);
4068 rc = hwrm_req_send_silent(bp, req);
4071 *index = le16_to_cpu(output->dir_idx);
4073 *item_length = le32_to_cpu(output->dir_item_length);
4075 *data_length = le32_to_cpu(output->dir_data_length);
4077 hwrm_req_drop(bp, req);
4081 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
4083 char *retval = NULL;
4090 /* null-terminate the log data (removing last '\n'): */
4091 data[datalen - 1] = 0;
4092 for (p = data; *p != 0; p++) {
4095 while (*p != 0 && *p != '\n') {
4097 while (*p != 0 && *p != '\t' && *p != '\n')
4099 if (field == desired_field)
4114 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
4116 struct bnxt *bp = netdev_priv(dev);
4123 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
4124 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
4125 &index, NULL, &pkglen);
4129 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
4131 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
4136 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
4140 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
4142 if (pkgver && *pkgver != 0 && isdigit(*pkgver))
4143 strscpy(ver, pkgver, size);
4153 static void bnxt_get_pkgver(struct net_device *dev)
4155 struct bnxt *bp = netdev_priv(dev);
4156 char buf[FW_VER_STR_LEN];
4159 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
4160 len = strlen(bp->fw_ver_str);
4161 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
4166 static int bnxt_get_eeprom(struct net_device *dev,
4167 struct ethtool_eeprom *eeprom,
4173 if (eeprom->offset == 0) /* special offset value to get directory */
4174 return bnxt_get_nvram_directory(dev, eeprom->len, data);
4176 index = eeprom->offset >> 24;
4177 offset = eeprom->offset & 0xffffff;
4180 netdev_err(dev, "unsupported index value: %d\n", index);
4184 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
4187 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
4189 struct hwrm_nvm_erase_dir_entry_input *req;
4190 struct bnxt *bp = netdev_priv(dev);
4193 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
4197 req->dir_idx = cpu_to_le16(index);
4198 return hwrm_req_send(bp, req);
4201 static int bnxt_set_eeprom(struct net_device *dev,
4202 struct ethtool_eeprom *eeprom,
4205 struct bnxt *bp = netdev_priv(dev);
4207 u16 type, ext, ordinal, attr;
4210 netdev_err(dev, "NVM write not supported from a virtual function\n");
4214 type = eeprom->magic >> 16;
4216 if (type == 0xffff) { /* special value for directory operations */
4217 index = eeprom->magic & 0xff;
4218 dir_op = eeprom->magic >> 8;
4222 case 0x0e: /* erase */
4223 if (eeprom->offset != ~eeprom->magic)
4225 return bnxt_erase_nvram_directory(dev, index - 1);
4231 /* Create or re-write an NVM item: */
4232 if (bnxt_dir_type_is_executable(type))
4234 ext = eeprom->magic & 0xffff;
4235 ordinal = eeprom->offset >> 16;
4236 attr = eeprom->offset & 0xffff;
4238 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
4242 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
4244 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
4245 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
4246 struct bnxt *bp = netdev_priv(dev);
4247 struct ethtool_keee *eee = &bp->eee;
4248 struct bnxt_link_info *link_info = &bp->link_info;
4251 if (!BNXT_PHY_CFG_ABLE(bp))
4254 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
4257 mutex_lock(&bp->link_lock);
4258 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
4259 if (!edata->eee_enabled)
4262 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4263 netdev_warn(dev, "EEE requires autoneg\n");
4267 if (edata->tx_lpi_enabled) {
4268 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
4269 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
4270 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
4271 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
4274 } else if (!bp->lpi_tmr_hi) {
4275 edata->tx_lpi_timer = eee->tx_lpi_timer;
4278 if (linkmode_empty(edata->advertised)) {
4279 linkmode_and(edata->advertised, advertising, eee->supported);
4280 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
4281 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
4286 linkmode_copy(eee->advertised, edata->advertised);
4287 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
4288 eee->tx_lpi_timer = edata->tx_lpi_timer;
4290 eee->eee_enabled = edata->eee_enabled;
4292 if (netif_running(dev))
4293 rc = bnxt_hwrm_set_link_setting(bp, false, true);
4296 mutex_unlock(&bp->link_lock);
4300 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
4302 struct bnxt *bp = netdev_priv(dev);
4304 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
4308 if (!bp->eee.eee_enabled) {
4309 /* Preserve tx_lpi_timer so that the last value will be used
4310 * by default when it is re-enabled.
4312 linkmode_zero(edata->advertised);
4313 edata->tx_lpi_enabled = 0;
4316 if (!bp->eee.eee_active)
4317 linkmode_zero(edata->lp_advertised);
4322 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
4323 u16 page_number, u8 bank,
4324 u16 start_addr, u16 data_length,
4327 struct hwrm_port_phy_i2c_read_output *output;
4328 struct hwrm_port_phy_i2c_read_input *req;
4329 int rc, byte_offset = 0;
4331 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
4335 output = hwrm_req_hold(bp, req);
4336 req->i2c_slave_addr = i2c_addr;
4337 req->page_number = cpu_to_le16(page_number);
4338 req->port_id = cpu_to_le16(bp->pf.port_id);
4342 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
4343 data_length -= xfer_size;
4344 req->page_offset = cpu_to_le16(start_addr + byte_offset);
4345 req->data_length = xfer_size;
4347 cpu_to_le32((start_addr + byte_offset ?
4348 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
4351 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
4353 rc = hwrm_req_send(bp, req);
4355 memcpy(buf + byte_offset, output->data, xfer_size);
4356 byte_offset += xfer_size;
4357 } while (!rc && data_length > 0);
4358 hwrm_req_drop(bp, req);
4363 static int bnxt_get_module_info(struct net_device *dev,
4364 struct ethtool_modinfo *modinfo)
4366 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
4367 struct bnxt *bp = netdev_priv(dev);
4370 /* No point in going further if phy status indicates
4371 * module is not inserted or if it is powered down or
4372 * if it is of type 10GBase-T
4374 if (bp->link_info.module_status >
4375 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4378 /* This feature is not supported in older firmware versions */
4379 if (bp->hwrm_spec_code < 0x10202)
4382 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
4383 SFF_DIAG_SUPPORT_OFFSET + 1,
4386 u8 module_id = data[0];
4387 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
4389 switch (module_id) {
4390 case SFF_MODULE_ID_SFP:
4391 modinfo->type = ETH_MODULE_SFF_8472;
4392 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4393 if (!diag_supported)
4394 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4396 case SFF_MODULE_ID_QSFP:
4397 case SFF_MODULE_ID_QSFP_PLUS:
4398 modinfo->type = ETH_MODULE_SFF_8436;
4399 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4401 case SFF_MODULE_ID_QSFP28:
4402 modinfo->type = ETH_MODULE_SFF_8636;
4403 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
4413 static int bnxt_get_module_eeprom(struct net_device *dev,
4414 struct ethtool_eeprom *eeprom,
4417 struct bnxt *bp = netdev_priv(dev);
4418 u16 start = eeprom->offset, length = eeprom->len;
4421 memset(data, 0, eeprom->len);
4423 /* Read A0 portion of the EEPROM */
4424 if (start < ETH_MODULE_SFF_8436_LEN) {
4425 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
4426 length = ETH_MODULE_SFF_8436_LEN - start;
4427 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
4428 start, length, data);
4433 length = eeprom->len - length;
4436 /* Read A2 portion of the EEPROM */
4438 start -= ETH_MODULE_SFF_8436_LEN;
4439 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
4440 start, length, data);
4445 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
4447 if (bp->link_info.module_status <=
4448 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4451 switch (bp->link_info.module_status) {
4452 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
4453 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
4455 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
4456 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
4458 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
4459 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
4462 NL_SET_ERR_MSG_MOD(extack, "Unknown error");
4468 static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
4469 const struct ethtool_module_eeprom *page_data,
4470 struct netlink_ext_ack *extack)
4472 struct bnxt *bp = netdev_priv(dev);
4475 rc = bnxt_get_module_status(bp, extack);
4479 if (bp->hwrm_spec_code < 0x10202) {
4480 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
4484 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
4485 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
4489 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
4490 page_data->page, page_data->bank,
4495 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
4498 return page_data->length;
4501 static int bnxt_nway_reset(struct net_device *dev)
4505 struct bnxt *bp = netdev_priv(dev);
4506 struct bnxt_link_info *link_info = &bp->link_info;
4508 if (!BNXT_PHY_CFG_ABLE(bp))
4511 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
4514 if (netif_running(dev))
4515 rc = bnxt_hwrm_set_link_setting(bp, true, false);
4520 static int bnxt_set_phys_id(struct net_device *dev,
4521 enum ethtool_phys_id_state state)
4523 struct hwrm_port_led_cfg_input *req;
4524 struct bnxt *bp = netdev_priv(dev);
4525 struct bnxt_pf_info *pf = &bp->pf;
4526 struct bnxt_led_cfg *led_cfg;
4531 if (!bp->num_leds || BNXT_VF(bp))
4534 if (state == ETHTOOL_ID_ACTIVE) {
4535 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
4536 duration = cpu_to_le16(500);
4537 } else if (state == ETHTOOL_ID_INACTIVE) {
4538 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
4539 duration = cpu_to_le16(0);
4543 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
4547 req->port_id = cpu_to_le16(pf->port_id);
4548 req->num_leds = bp->num_leds;
4549 led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
4550 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
4551 req->enables |= BNXT_LED_DFLT_ENABLES(i);
4552 led_cfg->led_id = bp->leds[i].led_id;
4553 led_cfg->led_state = led_state;
4554 led_cfg->led_blink_on = duration;
4555 led_cfg->led_blink_off = duration;
4556 led_cfg->led_group_id = bp->leds[i].led_group_id;
4558 return hwrm_req_send(bp, req);
4561 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
4563 struct hwrm_selftest_irq_input *req;
4566 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
4570 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4571 return hwrm_req_send(bp, req);
4574 static int bnxt_test_irq(struct bnxt *bp)
4578 for (i = 0; i < bp->cp_nr_rings; i++) {
4579 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
4582 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
4589 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
4591 struct hwrm_port_mac_cfg_input *req;
4594 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
4598 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
4600 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
4602 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
4603 return hwrm_req_send(bp, req);
4606 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
4608 struct hwrm_port_phy_qcaps_output *resp;
4609 struct hwrm_port_phy_qcaps_input *req;
4612 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
4616 resp = hwrm_req_hold(bp, req);
4617 rc = hwrm_req_send(bp, req);
4619 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
4621 hwrm_req_drop(bp, req);
4625 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
4626 struct hwrm_port_phy_cfg_input *req)
4628 struct bnxt_link_info *link_info = &bp->link_info;
4633 if (!link_info->autoneg ||
4634 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
4637 rc = bnxt_query_force_speeds(bp, &fw_advertising);
4641 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
4642 if (BNXT_LINK_IS_UP(bp))
4643 fw_speed = bp->link_info.link_speed;
4644 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
4645 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
4646 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
4647 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
4648 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
4649 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
4650 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
4651 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
4653 req->force_link_speed = cpu_to_le16(fw_speed);
4654 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
4655 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4656 rc = hwrm_req_send(bp, req);
4658 req->force_link_speed = cpu_to_le16(0);
4662 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
4664 struct hwrm_port_phy_cfg_input *req;
4667 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
4671 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4672 hwrm_req_hold(bp, req);
4675 bnxt_disable_an_for_lpbk(bp, req);
4677 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
4679 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
4681 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
4683 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
4684 rc = hwrm_req_send(bp, req);
4685 hwrm_req_drop(bp, req);
4689 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4690 u32 raw_cons, int pkt_size)
4692 struct bnxt_napi *bnapi = cpr->bnapi;
4693 struct bnxt_rx_ring_info *rxr;
4694 struct bnxt_sw_rx_bd *rx_buf;
4695 struct rx_cmp *rxcmp;
4701 rxr = bnapi->rx_ring;
4702 cp_cons = RING_CMP(raw_cons);
4703 rxcmp = (struct rx_cmp *)
4704 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
4705 cons = rxcmp->rx_cmp_opaque;
4706 rx_buf = &rxr->rx_buf_ring[cons];
4707 data = rx_buf->data_ptr;
4708 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
4709 if (len != pkt_size)
4712 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
4715 for ( ; i < pkt_size; i++) {
4716 if (data[i] != (u8)(i & 0xff))
4722 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4725 struct tx_cmp *txcmp;
4731 raw_cons = cpr->cp_raw_cons;
4732 for (i = 0; i < 200; i++) {
4733 cons = RING_CMP(raw_cons);
4734 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
4736 if (!TX_CMP_VALID(txcmp, raw_cons)) {
4741 /* The valid test of the entry must be done first before
4742 * reading any further.
4745 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
4746 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
4747 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
4748 raw_cons = NEXT_RAW_CMP(raw_cons);
4749 raw_cons = NEXT_RAW_CMP(raw_cons);
4752 raw_cons = NEXT_RAW_CMP(raw_cons);
4754 cpr->cp_raw_cons = raw_cons;
4758 static int bnxt_run_loopback(struct bnxt *bp)
4760 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
4761 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4762 struct bnxt_cp_ring_info *cpr;
4763 int pkt_size, i = 0;
4764 struct sk_buff *skb;
4769 cpr = &rxr->bnapi->cp_ring;
4770 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4772 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
4773 skb = netdev_alloc_skb(bp->dev, pkt_size);
4776 data = skb_put(skb, pkt_size);
4777 ether_addr_copy(&data[i], bp->dev->dev_addr);
4779 ether_addr_copy(&data[i], bp->dev->dev_addr);
4781 for ( ; i < pkt_size; i++)
4782 data[i] = (u8)(i & 0xff);
4784 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
4786 if (dma_mapping_error(&bp->pdev->dev, map)) {
4790 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
4792 /* Sync BD data before updating doorbell */
4795 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
4796 rc = bnxt_poll_loopback(bp, cpr, pkt_size);
4798 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
4803 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
4805 struct hwrm_selftest_exec_output *resp;
4806 struct hwrm_selftest_exec_input *req;
4809 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
4813 hwrm_req_timeout(bp, req, bp->test_info->timeout);
4814 req->flags = test_mask;
4816 resp = hwrm_req_hold(bp, req);
4817 rc = hwrm_req_send(bp, req);
4818 *test_results = resp->test_success;
4819 hwrm_req_drop(bp, req);
4823 #define BNXT_DRV_TESTS 4
4824 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
4825 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
4826 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
4827 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
4829 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
4832 struct bnxt *bp = netdev_priv(dev);
4833 bool do_ext_lpbk = false;
4834 bool offline = false;
4835 u8 test_results = 0;
4839 if (!bp->num_tests || !BNXT_PF(bp))
4842 if (etest->flags & ETH_TEST_FL_OFFLINE &&
4843 bnxt_ulp_registered(bp->edev)) {
4844 etest->flags |= ETH_TEST_FL_FAILED;
4845 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
4849 memset(buf, 0, sizeof(u64) * bp->num_tests);
4850 if (!netif_running(dev)) {
4851 etest->flags |= ETH_TEST_FL_FAILED;
4855 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
4856 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
4859 if (etest->flags & ETH_TEST_FL_OFFLINE) {
4860 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
4861 etest->flags |= ETH_TEST_FL_FAILED;
4862 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
4868 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4869 u8 bit_val = 1 << i;
4871 if (!(bp->test_info->offline_mask & bit_val))
4872 test_mask |= bit_val;
4874 test_mask |= bit_val;
4877 bnxt_run_fw_tests(bp, test_mask, &test_results);
4879 bnxt_close_nic(bp, true, false);
4880 bnxt_run_fw_tests(bp, test_mask, &test_results);
4882 buf[BNXT_MACLPBK_TEST_IDX] = 1;
4883 bnxt_hwrm_mac_loopback(bp, true);
4885 rc = bnxt_half_open_nic(bp);
4887 bnxt_hwrm_mac_loopback(bp, false);
4888 etest->flags |= ETH_TEST_FL_FAILED;
4891 if (bnxt_run_loopback(bp))
4892 etest->flags |= ETH_TEST_FL_FAILED;
4894 buf[BNXT_MACLPBK_TEST_IDX] = 0;
4896 bnxt_hwrm_mac_loopback(bp, false);
4897 bnxt_hwrm_phy_loopback(bp, true, false);
4899 if (bnxt_run_loopback(bp)) {
4900 buf[BNXT_PHYLPBK_TEST_IDX] = 1;
4901 etest->flags |= ETH_TEST_FL_FAILED;
4904 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
4905 bnxt_hwrm_phy_loopback(bp, true, true);
4907 if (bnxt_run_loopback(bp)) {
4908 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
4909 etest->flags |= ETH_TEST_FL_FAILED;
4912 bnxt_hwrm_phy_loopback(bp, false, false);
4913 bnxt_half_close_nic(bp);
4914 rc = bnxt_open_nic(bp, true, true);
4916 if (rc || bnxt_test_irq(bp)) {
4917 buf[BNXT_IRQ_TEST_IDX] = 1;
4918 etest->flags |= ETH_TEST_FL_FAILED;
4920 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4921 u8 bit_val = 1 << i;
4923 if ((test_mask & bit_val) && !(test_results & bit_val)) {
4925 etest->flags |= ETH_TEST_FL_FAILED;
4930 static int bnxt_reset(struct net_device *dev, u32 *flags)
4932 struct bnxt *bp = netdev_priv(dev);
4933 bool reload = false;
4940 netdev_err(dev, "Reset is not supported from a VF\n");
4944 if (pci_vfs_assigned(bp->pdev) &&
4945 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
4947 "Reset not allowed when VFs are assigned to VMs\n");
4951 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
4952 /* This feature is not supported in older firmware versions */
4953 if (bp->hwrm_spec_code >= 0x10803) {
4954 if (!bnxt_firmware_reset_chip(dev)) {
4955 netdev_info(dev, "Firmware reset request successful.\n");
4956 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
4958 *flags &= ~BNXT_FW_RESET_CHIP;
4960 } else if (req == BNXT_FW_RESET_CHIP) {
4961 return -EOPNOTSUPP; /* only request, fail hard */
4965 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
4966 /* This feature is not supported in older firmware versions */
4967 if (bp->hwrm_spec_code >= 0x10803) {
4968 if (!bnxt_firmware_reset_ap(dev)) {
4969 netdev_info(dev, "Reset application processor successful.\n");
4971 *flags &= ~BNXT_FW_RESET_AP;
4973 } else if (req == BNXT_FW_RESET_AP) {
4974 return -EOPNOTSUPP; /* only request, fail hard */
4979 netdev_info(dev, "Reload driver to complete reset\n");
4984 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
4986 struct bnxt *bp = netdev_priv(dev);
4988 if (dump->flag > BNXT_DUMP_CRASH) {
4989 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
4993 if (dump->flag == BNXT_DUMP_CRASH) {
4994 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
4995 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
4997 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
4999 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
5000 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
5005 bp->dump_flag = dump->flag;
5009 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
5011 struct bnxt *bp = netdev_priv(dev);
5013 if (bp->hwrm_spec_code < 0x10801)
5016 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
5017 bp->ver_resp.hwrm_fw_min_8b << 16 |
5018 bp->ver_resp.hwrm_fw_bld_8b << 8 |
5019 bp->ver_resp.hwrm_fw_rsvd_8b;
5021 dump->flag = bp->dump_flag;
5022 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
5026 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
5029 struct bnxt *bp = netdev_priv(dev);
5031 if (bp->hwrm_spec_code < 0x10801)
5034 memset(buf, 0, dump->len);
5036 dump->flag = bp->dump_flag;
5037 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
5040 static int bnxt_get_ts_info(struct net_device *dev,
5041 struct kernel_ethtool_ts_info *info)
5043 struct bnxt *bp = netdev_priv(dev);
5044 struct bnxt_ptp_cfg *ptp;
5047 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
5052 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
5053 SOF_TIMESTAMPING_RX_HARDWARE |
5054 SOF_TIMESTAMPING_RAW_HARDWARE;
5056 info->phc_index = ptp_clock_index(ptp->ptp_clock);
5058 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5060 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5061 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5062 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5064 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
5065 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
5069 void bnxt_ethtool_init(struct bnxt *bp)
5071 struct hwrm_selftest_qlist_output *resp;
5072 struct hwrm_selftest_qlist_input *req;
5073 struct bnxt_test_info *test_info;
5074 struct net_device *dev = bp->dev;
5077 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
5078 bnxt_get_pkgver(dev);
5081 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
5084 test_info = bp->test_info;
5086 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
5089 bp->test_info = test_info;
5092 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
5095 resp = hwrm_req_hold(bp, req);
5096 rc = hwrm_req_send_silent(bp, req);
5098 goto ethtool_init_exit;
5100 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
5101 if (bp->num_tests > BNXT_MAX_TEST)
5102 bp->num_tests = BNXT_MAX_TEST;
5104 test_info->offline_mask = resp->offline_tests;
5105 test_info->timeout = le16_to_cpu(resp->test_timeout);
5106 if (!test_info->timeout)
5107 test_info->timeout = HWRM_CMD_TIMEOUT;
5108 for (i = 0; i < bp->num_tests; i++) {
5109 char *str = test_info->string[i];
5110 char *fw_str = resp->test_name[i];
5112 if (i == BNXT_MACLPBK_TEST_IDX) {
5113 strcpy(str, "Mac loopback test (offline)");
5114 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
5115 strcpy(str, "Phy loopback test (offline)");
5116 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
5117 strcpy(str, "Ext loopback test (offline)");
5118 } else if (i == BNXT_IRQ_TEST_IDX) {
5119 strcpy(str, "Interrupt_test (offline)");
5121 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
5122 fw_str, test_info->offline_mask & (1 << i) ?
5123 "offline" : "online");
5128 hwrm_req_drop(bp, req);
5131 static void bnxt_get_eth_phy_stats(struct net_device *dev,
5132 struct ethtool_eth_phy_stats *phy_stats)
5134 struct bnxt *bp = netdev_priv(dev);
5137 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
5140 rx = bp->rx_port_stats_ext.sw_stats;
5141 phy_stats->SymbolErrorDuringCarrier =
5142 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
5145 static void bnxt_get_eth_mac_stats(struct net_device *dev,
5146 struct ethtool_eth_mac_stats *mac_stats)
5148 struct bnxt *bp = netdev_priv(dev);
5151 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5154 rx = bp->port_stats.sw_stats;
5155 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5157 mac_stats->FramesReceivedOK =
5158 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
5159 mac_stats->FramesTransmittedOK =
5160 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
5161 mac_stats->FrameCheckSequenceErrors =
5162 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
5163 mac_stats->AlignmentErrors =
5164 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
5165 mac_stats->OutOfRangeLengthField =
5166 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
5169 static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
5170 struct ethtool_eth_ctrl_stats *ctrl_stats)
5172 struct bnxt *bp = netdev_priv(dev);
5175 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5178 rx = bp->port_stats.sw_stats;
5179 ctrl_stats->MACControlFramesReceived =
5180 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
5183 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
5197 static void bnxt_get_rmon_stats(struct net_device *dev,
5198 struct ethtool_rmon_stats *rmon_stats,
5199 const struct ethtool_rmon_hist_range **ranges)
5201 struct bnxt *bp = netdev_priv(dev);
5204 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5207 rx = bp->port_stats.sw_stats;
5208 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5210 rmon_stats->jabbers =
5211 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
5212 rmon_stats->oversize_pkts =
5213 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
5214 rmon_stats->undersize_pkts =
5215 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
5217 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
5218 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
5219 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
5220 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
5221 rmon_stats->hist[4] =
5222 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
5223 rmon_stats->hist[5] =
5224 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
5225 rmon_stats->hist[6] =
5226 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
5227 rmon_stats->hist[7] =
5228 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
5229 rmon_stats->hist[8] =
5230 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
5231 rmon_stats->hist[9] =
5232 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
5234 rmon_stats->hist_tx[0] =
5235 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
5236 rmon_stats->hist_tx[1] =
5237 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
5238 rmon_stats->hist_tx[2] =
5239 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
5240 rmon_stats->hist_tx[3] =
5241 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
5242 rmon_stats->hist_tx[4] =
5243 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
5244 rmon_stats->hist_tx[5] =
5245 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
5246 rmon_stats->hist_tx[6] =
5247 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
5248 rmon_stats->hist_tx[7] =
5249 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
5250 rmon_stats->hist_tx[8] =
5251 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
5252 rmon_stats->hist_tx[9] =
5253 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
5255 *ranges = bnxt_rmon_ranges;
5258 static void bnxt_get_ptp_stats(struct net_device *dev,
5259 struct ethtool_ts_stats *ts_stats)
5261 struct bnxt *bp = netdev_priv(dev);
5262 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5265 ts_stats->pkts = ptp->stats.ts_pkts;
5266 ts_stats->lost = ptp->stats.ts_lost;
5267 ts_stats->err = atomic64_read(&ptp->stats.ts_err);
5271 static void bnxt_get_link_ext_stats(struct net_device *dev,
5272 struct ethtool_link_ext_stats *stats)
5274 struct bnxt *bp = netdev_priv(dev);
5277 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
5280 rx = bp->rx_port_stats_ext.sw_stats;
5281 stats->link_down_events =
5282 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
5285 void bnxt_ethtool_free(struct bnxt *bp)
5287 kfree(bp->test_info);
5288 bp->test_info = NULL;
5291 const struct ethtool_ops bnxt_ethtool_ops = {
5292 .cap_link_lanes_supported = 1,
5293 .rxfh_per_ctx_key = 1,
5294 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
5295 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
5296 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
5297 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5298 ETHTOOL_COALESCE_MAX_FRAMES |
5299 ETHTOOL_COALESCE_USECS_IRQ |
5300 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
5301 ETHTOOL_COALESCE_STATS_BLOCK_USECS |
5302 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
5303 ETHTOOL_COALESCE_USE_CQE,
5304 .get_link_ksettings = bnxt_get_link_ksettings,
5305 .set_link_ksettings = bnxt_set_link_ksettings,
5306 .get_fec_stats = bnxt_get_fec_stats,
5307 .get_fecparam = bnxt_get_fecparam,
5308 .set_fecparam = bnxt_set_fecparam,
5309 .get_pause_stats = bnxt_get_pause_stats,
5310 .get_pauseparam = bnxt_get_pauseparam,
5311 .set_pauseparam = bnxt_set_pauseparam,
5312 .get_drvinfo = bnxt_get_drvinfo,
5313 .get_regs_len = bnxt_get_regs_len,
5314 .get_regs = bnxt_get_regs,
5315 .get_wol = bnxt_get_wol,
5316 .set_wol = bnxt_set_wol,
5317 .get_coalesce = bnxt_get_coalesce,
5318 .set_coalesce = bnxt_set_coalesce,
5319 .get_msglevel = bnxt_get_msglevel,
5320 .set_msglevel = bnxt_set_msglevel,
5321 .get_sset_count = bnxt_get_sset_count,
5322 .get_strings = bnxt_get_strings,
5323 .get_ethtool_stats = bnxt_get_ethtool_stats,
5324 .set_ringparam = bnxt_set_ringparam,
5325 .get_ringparam = bnxt_get_ringparam,
5326 .get_channels = bnxt_get_channels,
5327 .set_channels = bnxt_set_channels,
5328 .get_rxnfc = bnxt_get_rxnfc,
5329 .set_rxnfc = bnxt_set_rxnfc,
5330 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
5331 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
5332 .get_rxfh = bnxt_get_rxfh,
5333 .set_rxfh = bnxt_set_rxfh,
5334 .create_rxfh_context = bnxt_create_rxfh_context,
5335 .modify_rxfh_context = bnxt_modify_rxfh_context,
5336 .remove_rxfh_context = bnxt_remove_rxfh_context,
5337 .flash_device = bnxt_flash_device,
5338 .get_eeprom_len = bnxt_get_eeprom_len,
5339 .get_eeprom = bnxt_get_eeprom,
5340 .set_eeprom = bnxt_set_eeprom,
5341 .get_link = bnxt_get_link,
5342 .get_link_ext_stats = bnxt_get_link_ext_stats,
5343 .get_eee = bnxt_get_eee,
5344 .set_eee = bnxt_set_eee,
5345 .get_module_info = bnxt_get_module_info,
5346 .get_module_eeprom = bnxt_get_module_eeprom,
5347 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
5348 .nway_reset = bnxt_nway_reset,
5349 .set_phys_id = bnxt_set_phys_id,
5350 .self_test = bnxt_self_test,
5351 .get_ts_info = bnxt_get_ts_info,
5352 .reset = bnxt_reset,
5353 .set_dump = bnxt_set_dump,
5354 .get_dump_flag = bnxt_get_dump_flag,
5355 .get_dump_data = bnxt_get_dump_data,
5356 .get_eth_phy_stats = bnxt_get_eth_phy_stats,
5357 .get_eth_mac_stats = bnxt_get_eth_mac_stats,
5358 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
5359 .get_rmon_stats = bnxt_get_rmon_stats,
5360 .get_ts_stats = bnxt_get_ptp_stats,