Merge tag 'soc-ep93xx-dt-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_ethtool.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/bitops.h>
12 #include <linux/ctype.h>
13 #include <linux/stringify.h>
14 #include <linux/ethtool.h>
15 #include <linux/ethtool_netlink.h>
16 #include <linux/linkmode.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
20 #include <linux/crc32.h>
21 #include <linux/firmware.h>
22 #include <linux/utsname.h>
23 #include <linux/time.h>
24 #include <linux/ptp_clock_kernel.h>
25 #include <linux/net_tstamp.h>
26 #include <linux/timecounter.h>
27 #include <net/netlink.h>
28 #include "bnxt_hsi.h"
29 #include "bnxt.h"
30 #include "bnxt_hwrm.h"
31 #include "bnxt_ulp.h"
32 #include "bnxt_xdp.h"
33 #include "bnxt_ptp.h"
34 #include "bnxt_ethtool.h"
35 #include "bnxt_nvm_defs.h"      /* NVRAM content constant and structure defs */
36 #include "bnxt_fw_hdr.h"        /* Firmware hdr constant and structure defs */
37 #include "bnxt_coredump.h"
38
39 #define BNXT_NVM_ERR_MSG(dev, extack, msg)                      \
40         do {                                                    \
41                 if (extack)                                     \
42                         NL_SET_ERR_MSG_MOD(extack, msg);        \
43                 netdev_err(dev, "%s\n", msg);                   \
44         } while (0)
45
46 static u32 bnxt_get_msglevel(struct net_device *dev)
47 {
48         struct bnxt *bp = netdev_priv(dev);
49
50         return bp->msg_enable;
51 }
52
53 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
54 {
55         struct bnxt *bp = netdev_priv(dev);
56
57         bp->msg_enable = value;
58 }
59
60 static int bnxt_get_coalesce(struct net_device *dev,
61                              struct ethtool_coalesce *coal,
62                              struct kernel_ethtool_coalesce *kernel_coal,
63                              struct netlink_ext_ack *extack)
64 {
65         struct bnxt *bp = netdev_priv(dev);
66         struct bnxt_coal *hw_coal;
67         u16 mult;
68
69         memset(coal, 0, sizeof(*coal));
70
71         coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
72
73         hw_coal = &bp->rx_coal;
74         mult = hw_coal->bufs_per_record;
75         coal->rx_coalesce_usecs = hw_coal->coal_ticks;
76         coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
77         coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
78         coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
79         if (hw_coal->flags &
80             RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
81                 kernel_coal->use_cqe_mode_rx = true;
82
83         hw_coal = &bp->tx_coal;
84         mult = hw_coal->bufs_per_record;
85         coal->tx_coalesce_usecs = hw_coal->coal_ticks;
86         coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
87         coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
88         coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
89         if (hw_coal->flags &
90             RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
91                 kernel_coal->use_cqe_mode_tx = true;
92
93         coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
94
95         return 0;
96 }
97
98 static int bnxt_set_coalesce(struct net_device *dev,
99                              struct ethtool_coalesce *coal,
100                              struct kernel_ethtool_coalesce *kernel_coal,
101                              struct netlink_ext_ack *extack)
102 {
103         struct bnxt *bp = netdev_priv(dev);
104         bool update_stats = false;
105         struct bnxt_coal *hw_coal;
106         int rc = 0;
107         u16 mult;
108
109         if (coal->use_adaptive_rx_coalesce) {
110                 bp->flags |= BNXT_FLAG_DIM;
111         } else {
112                 if (bp->flags & BNXT_FLAG_DIM) {
113                         bp->flags &= ~(BNXT_FLAG_DIM);
114                         goto reset_coalesce;
115                 }
116         }
117
118         if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
119             !(bp->coal_cap.cmpl_params &
120               RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
121                 return -EOPNOTSUPP;
122
123         hw_coal = &bp->rx_coal;
124         mult = hw_coal->bufs_per_record;
125         hw_coal->coal_ticks = coal->rx_coalesce_usecs;
126         hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
127         hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
128         hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
129         hw_coal->flags &=
130                 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
131         if (kernel_coal->use_cqe_mode_rx)
132                 hw_coal->flags |=
133                         RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
134
135         hw_coal = &bp->tx_coal;
136         mult = hw_coal->bufs_per_record;
137         hw_coal->coal_ticks = coal->tx_coalesce_usecs;
138         hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
139         hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
140         hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
141         hw_coal->flags &=
142                 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
143         if (kernel_coal->use_cqe_mode_tx)
144                 hw_coal->flags |=
145                         RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
146
147         if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
148                 u32 stats_ticks = coal->stats_block_coalesce_usecs;
149
150                 /* Allow 0, which means disable. */
151                 if (stats_ticks)
152                         stats_ticks = clamp_t(u32, stats_ticks,
153                                               BNXT_MIN_STATS_COAL_TICKS,
154                                               BNXT_MAX_STATS_COAL_TICKS);
155                 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
156                 bp->stats_coal_ticks = stats_ticks;
157                 if (bp->stats_coal_ticks)
158                         bp->current_interval =
159                                 bp->stats_coal_ticks * HZ / 1000000;
160                 else
161                         bp->current_interval = BNXT_TIMER_INTERVAL;
162                 update_stats = true;
163         }
164
165 reset_coalesce:
166         if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
167                 if (update_stats) {
168                         bnxt_close_nic(bp, true, false);
169                         rc = bnxt_open_nic(bp, true, false);
170                 } else {
171                         rc = bnxt_hwrm_set_coal(bp);
172                 }
173         }
174
175         return rc;
176 }
177
178 static const char * const bnxt_ring_rx_stats_str[] = {
179         "rx_ucast_packets",
180         "rx_mcast_packets",
181         "rx_bcast_packets",
182         "rx_discards",
183         "rx_errors",
184         "rx_ucast_bytes",
185         "rx_mcast_bytes",
186         "rx_bcast_bytes",
187 };
188
189 static const char * const bnxt_ring_tx_stats_str[] = {
190         "tx_ucast_packets",
191         "tx_mcast_packets",
192         "tx_bcast_packets",
193         "tx_errors",
194         "tx_discards",
195         "tx_ucast_bytes",
196         "tx_mcast_bytes",
197         "tx_bcast_bytes",
198 };
199
200 static const char * const bnxt_ring_tpa_stats_str[] = {
201         "tpa_packets",
202         "tpa_bytes",
203         "tpa_events",
204         "tpa_aborts",
205 };
206
207 static const char * const bnxt_ring_tpa2_stats_str[] = {
208         "rx_tpa_eligible_pkt",
209         "rx_tpa_eligible_bytes",
210         "rx_tpa_pkt",
211         "rx_tpa_bytes",
212         "rx_tpa_errors",
213         "rx_tpa_events",
214 };
215
216 static const char * const bnxt_rx_sw_stats_str[] = {
217         "rx_l4_csum_errors",
218         "rx_resets",
219         "rx_buf_errors",
220 };
221
222 static const char * const bnxt_cmn_sw_stats_str[] = {
223         "missed_irqs",
224 };
225
226 #define BNXT_RX_STATS_ENTRY(counter)    \
227         { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
228
229 #define BNXT_TX_STATS_ENTRY(counter)    \
230         { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
231
232 #define BNXT_RX_STATS_EXT_ENTRY(counter)        \
233         { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
234
235 #define BNXT_TX_STATS_EXT_ENTRY(counter)        \
236         { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
237
238 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n)                          \
239         BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),   \
240         BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
241
242 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n)                          \
243         BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),   \
244         BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
245
246 #define BNXT_RX_STATS_EXT_PFC_ENTRIES                           \
247         BNXT_RX_STATS_EXT_PFC_ENTRY(0),                         \
248         BNXT_RX_STATS_EXT_PFC_ENTRY(1),                         \
249         BNXT_RX_STATS_EXT_PFC_ENTRY(2),                         \
250         BNXT_RX_STATS_EXT_PFC_ENTRY(3),                         \
251         BNXT_RX_STATS_EXT_PFC_ENTRY(4),                         \
252         BNXT_RX_STATS_EXT_PFC_ENTRY(5),                         \
253         BNXT_RX_STATS_EXT_PFC_ENTRY(6),                         \
254         BNXT_RX_STATS_EXT_PFC_ENTRY(7)
255
256 #define BNXT_TX_STATS_EXT_PFC_ENTRIES                           \
257         BNXT_TX_STATS_EXT_PFC_ENTRY(0),                         \
258         BNXT_TX_STATS_EXT_PFC_ENTRY(1),                         \
259         BNXT_TX_STATS_EXT_PFC_ENTRY(2),                         \
260         BNXT_TX_STATS_EXT_PFC_ENTRY(3),                         \
261         BNXT_TX_STATS_EXT_PFC_ENTRY(4),                         \
262         BNXT_TX_STATS_EXT_PFC_ENTRY(5),                         \
263         BNXT_TX_STATS_EXT_PFC_ENTRY(6),                         \
264         BNXT_TX_STATS_EXT_PFC_ENTRY(7)
265
266 #define BNXT_RX_STATS_EXT_COS_ENTRY(n)                          \
267         BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),               \
268         BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
269
270 #define BNXT_TX_STATS_EXT_COS_ENTRY(n)                          \
271         BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),               \
272         BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
273
274 #define BNXT_RX_STATS_EXT_COS_ENTRIES                           \
275         BNXT_RX_STATS_EXT_COS_ENTRY(0),                         \
276         BNXT_RX_STATS_EXT_COS_ENTRY(1),                         \
277         BNXT_RX_STATS_EXT_COS_ENTRY(2),                         \
278         BNXT_RX_STATS_EXT_COS_ENTRY(3),                         \
279         BNXT_RX_STATS_EXT_COS_ENTRY(4),                         \
280         BNXT_RX_STATS_EXT_COS_ENTRY(5),                         \
281         BNXT_RX_STATS_EXT_COS_ENTRY(6),                         \
282         BNXT_RX_STATS_EXT_COS_ENTRY(7)                          \
283
284 #define BNXT_TX_STATS_EXT_COS_ENTRIES                           \
285         BNXT_TX_STATS_EXT_COS_ENTRY(0),                         \
286         BNXT_TX_STATS_EXT_COS_ENTRY(1),                         \
287         BNXT_TX_STATS_EXT_COS_ENTRY(2),                         \
288         BNXT_TX_STATS_EXT_COS_ENTRY(3),                         \
289         BNXT_TX_STATS_EXT_COS_ENTRY(4),                         \
290         BNXT_TX_STATS_EXT_COS_ENTRY(5),                         \
291         BNXT_TX_STATS_EXT_COS_ENTRY(6),                         \
292         BNXT_TX_STATS_EXT_COS_ENTRY(7)                          \
293
294 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)                  \
295         BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),       \
296         BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
297
298 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES                           \
299         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),                         \
300         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),                         \
301         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),                         \
302         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),                         \
303         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),                         \
304         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),                         \
305         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),                         \
306         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
307
308 #define BNXT_RX_STATS_PRI_ENTRY(counter, n)             \
309         { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),     \
310           __stringify(counter##_pri##n) }
311
312 #define BNXT_TX_STATS_PRI_ENTRY(counter, n)             \
313         { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),     \
314           __stringify(counter##_pri##n) }
315
316 #define BNXT_RX_STATS_PRI_ENTRIES(counter)              \
317         BNXT_RX_STATS_PRI_ENTRY(counter, 0),            \
318         BNXT_RX_STATS_PRI_ENTRY(counter, 1),            \
319         BNXT_RX_STATS_PRI_ENTRY(counter, 2),            \
320         BNXT_RX_STATS_PRI_ENTRY(counter, 3),            \
321         BNXT_RX_STATS_PRI_ENTRY(counter, 4),            \
322         BNXT_RX_STATS_PRI_ENTRY(counter, 5),            \
323         BNXT_RX_STATS_PRI_ENTRY(counter, 6),            \
324         BNXT_RX_STATS_PRI_ENTRY(counter, 7)
325
326 #define BNXT_TX_STATS_PRI_ENTRIES(counter)              \
327         BNXT_TX_STATS_PRI_ENTRY(counter, 0),            \
328         BNXT_TX_STATS_PRI_ENTRY(counter, 1),            \
329         BNXT_TX_STATS_PRI_ENTRY(counter, 2),            \
330         BNXT_TX_STATS_PRI_ENTRY(counter, 3),            \
331         BNXT_TX_STATS_PRI_ENTRY(counter, 4),            \
332         BNXT_TX_STATS_PRI_ENTRY(counter, 5),            \
333         BNXT_TX_STATS_PRI_ENTRY(counter, 6),            \
334         BNXT_TX_STATS_PRI_ENTRY(counter, 7)
335
336 enum {
337         RX_TOTAL_DISCARDS,
338         TX_TOTAL_DISCARDS,
339         RX_NETPOLL_DISCARDS,
340 };
341
342 static const char *const bnxt_ring_err_stats_arr[] = {
343         "rx_total_l4_csum_errors",
344         "rx_total_resets",
345         "rx_total_buf_errors",
346         "rx_total_oom_discards",
347         "rx_total_netpoll_discards",
348         "rx_total_ring_discards",
349         "tx_total_resets",
350         "tx_total_ring_discards",
351         "total_missed_irqs",
352 };
353
354 #define NUM_RING_RX_SW_STATS            ARRAY_SIZE(bnxt_rx_sw_stats_str)
355 #define NUM_RING_CMN_SW_STATS           ARRAY_SIZE(bnxt_cmn_sw_stats_str)
356 #define NUM_RING_RX_HW_STATS            ARRAY_SIZE(bnxt_ring_rx_stats_str)
357 #define NUM_RING_TX_HW_STATS            ARRAY_SIZE(bnxt_ring_tx_stats_str)
358
359 static const struct {
360         long offset;
361         char string[ETH_GSTRING_LEN];
362 } bnxt_port_stats_arr[] = {
363         BNXT_RX_STATS_ENTRY(rx_64b_frames),
364         BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
365         BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
366         BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
367         BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
368         BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
369         BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
370         BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
371         BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
372         BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
373         BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
374         BNXT_RX_STATS_ENTRY(rx_total_frames),
375         BNXT_RX_STATS_ENTRY(rx_ucast_frames),
376         BNXT_RX_STATS_ENTRY(rx_mcast_frames),
377         BNXT_RX_STATS_ENTRY(rx_bcast_frames),
378         BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
379         BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
380         BNXT_RX_STATS_ENTRY(rx_pause_frames),
381         BNXT_RX_STATS_ENTRY(rx_pfc_frames),
382         BNXT_RX_STATS_ENTRY(rx_align_err_frames),
383         BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
384         BNXT_RX_STATS_ENTRY(rx_jbr_frames),
385         BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
386         BNXT_RX_STATS_ENTRY(rx_tagged_frames),
387         BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
388         BNXT_RX_STATS_ENTRY(rx_good_frames),
389         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
390         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
391         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
392         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
393         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
394         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
395         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
396         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
397         BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
398         BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
399         BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
400         BNXT_RX_STATS_ENTRY(rx_bytes),
401         BNXT_RX_STATS_ENTRY(rx_runt_bytes),
402         BNXT_RX_STATS_ENTRY(rx_runt_frames),
403         BNXT_RX_STATS_ENTRY(rx_stat_discard),
404         BNXT_RX_STATS_ENTRY(rx_stat_err),
405
406         BNXT_TX_STATS_ENTRY(tx_64b_frames),
407         BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
408         BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
409         BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
410         BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
411         BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
412         BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
413         BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
414         BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
415         BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
416         BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
417         BNXT_TX_STATS_ENTRY(tx_good_frames),
418         BNXT_TX_STATS_ENTRY(tx_total_frames),
419         BNXT_TX_STATS_ENTRY(tx_ucast_frames),
420         BNXT_TX_STATS_ENTRY(tx_mcast_frames),
421         BNXT_TX_STATS_ENTRY(tx_bcast_frames),
422         BNXT_TX_STATS_ENTRY(tx_pause_frames),
423         BNXT_TX_STATS_ENTRY(tx_pfc_frames),
424         BNXT_TX_STATS_ENTRY(tx_jabber_frames),
425         BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
426         BNXT_TX_STATS_ENTRY(tx_err),
427         BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
428         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
429         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
430         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
431         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
432         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
433         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
434         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
435         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
436         BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
437         BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
438         BNXT_TX_STATS_ENTRY(tx_total_collisions),
439         BNXT_TX_STATS_ENTRY(tx_bytes),
440         BNXT_TX_STATS_ENTRY(tx_xthol_frames),
441         BNXT_TX_STATS_ENTRY(tx_stat_discard),
442         BNXT_TX_STATS_ENTRY(tx_stat_error),
443 };
444
445 static const struct {
446         long offset;
447         char string[ETH_GSTRING_LEN];
448 } bnxt_port_stats_ext_arr[] = {
449         BNXT_RX_STATS_EXT_ENTRY(link_down_events),
450         BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
451         BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
452         BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
453         BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
454         BNXT_RX_STATS_EXT_COS_ENTRIES,
455         BNXT_RX_STATS_EXT_PFC_ENTRIES,
456         BNXT_RX_STATS_EXT_ENTRY(rx_bits),
457         BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
458         BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
459         BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
460         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
461         BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
462         BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
463         BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
464 };
465
466 static const struct {
467         long offset;
468         char string[ETH_GSTRING_LEN];
469 } bnxt_tx_port_stats_ext_arr[] = {
470         BNXT_TX_STATS_EXT_COS_ENTRIES,
471         BNXT_TX_STATS_EXT_PFC_ENTRIES,
472 };
473
474 static const struct {
475         long base_off;
476         char string[ETH_GSTRING_LEN];
477 } bnxt_rx_bytes_pri_arr[] = {
478         BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
479 };
480
481 static const struct {
482         long base_off;
483         char string[ETH_GSTRING_LEN];
484 } bnxt_rx_pkts_pri_arr[] = {
485         BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
486 };
487
488 static const struct {
489         long base_off;
490         char string[ETH_GSTRING_LEN];
491 } bnxt_tx_bytes_pri_arr[] = {
492         BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
493 };
494
495 static const struct {
496         long base_off;
497         char string[ETH_GSTRING_LEN];
498 } bnxt_tx_pkts_pri_arr[] = {
499         BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
500 };
501
502 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr)
503 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
504 #define BNXT_NUM_STATS_PRI                      \
505         (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +    \
506          ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +     \
507          ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +    \
508          ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
509
510 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
511 {
512         if (BNXT_SUPPORTS_TPA(bp)) {
513                 if (bp->max_tpa_v2) {
514                         if (BNXT_CHIP_P5(bp))
515                                 return BNXT_NUM_TPA_RING_STATS_P5;
516                         return BNXT_NUM_TPA_RING_STATS_P7;
517                 }
518                 return BNXT_NUM_TPA_RING_STATS;
519         }
520         return 0;
521 }
522
523 static int bnxt_get_num_ring_stats(struct bnxt *bp)
524 {
525         int rx, tx, cmn;
526
527         rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
528              bnxt_get_num_tpa_ring_stats(bp);
529         tx = NUM_RING_TX_HW_STATS;
530         cmn = NUM_RING_CMN_SW_STATS;
531         return rx * bp->rx_nr_rings +
532                tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
533                cmn * bp->cp_nr_rings;
534 }
535
536 static int bnxt_get_num_stats(struct bnxt *bp)
537 {
538         int num_stats = bnxt_get_num_ring_stats(bp);
539         int len;
540
541         num_stats += BNXT_NUM_RING_ERR_STATS;
542
543         if (bp->flags & BNXT_FLAG_PORT_STATS)
544                 num_stats += BNXT_NUM_PORT_STATS;
545
546         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
547                 len = min_t(int, bp->fw_rx_stats_ext_size,
548                             ARRAY_SIZE(bnxt_port_stats_ext_arr));
549                 num_stats += len;
550                 len = min_t(int, bp->fw_tx_stats_ext_size,
551                             ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
552                 num_stats += len;
553                 if (bp->pri2cos_valid)
554                         num_stats += BNXT_NUM_STATS_PRI;
555         }
556
557         return num_stats;
558 }
559
560 static int bnxt_get_sset_count(struct net_device *dev, int sset)
561 {
562         struct bnxt *bp = netdev_priv(dev);
563
564         switch (sset) {
565         case ETH_SS_STATS:
566                 return bnxt_get_num_stats(bp);
567         case ETH_SS_TEST:
568                 if (!bp->num_tests)
569                         return -EOPNOTSUPP;
570                 return bp->num_tests;
571         default:
572                 return -EOPNOTSUPP;
573         }
574 }
575
576 static bool is_rx_ring(struct bnxt *bp, int ring_num)
577 {
578         return ring_num < bp->rx_nr_rings;
579 }
580
581 static bool is_tx_ring(struct bnxt *bp, int ring_num)
582 {
583         int tx_base = 0;
584
585         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
586                 tx_base = bp->rx_nr_rings;
587
588         if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
589                 return true;
590         return false;
591 }
592
593 static void bnxt_get_ethtool_stats(struct net_device *dev,
594                                    struct ethtool_stats *stats, u64 *buf)
595 {
596         struct bnxt_total_ring_err_stats ring_err_stats = {0};
597         struct bnxt *bp = netdev_priv(dev);
598         u64 *curr, *prev;
599         u32 tpa_stats;
600         u32 i, j = 0;
601
602         if (!bp->bnapi) {
603                 j += bnxt_get_num_ring_stats(bp);
604                 goto skip_ring_stats;
605         }
606
607         tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
608         for (i = 0; i < bp->cp_nr_rings; i++) {
609                 struct bnxt_napi *bnapi = bp->bnapi[i];
610                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
611                 u64 *sw_stats = cpr->stats.sw_stats;
612                 u64 *sw;
613                 int k;
614
615                 if (is_rx_ring(bp, i)) {
616                         for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
617                                 buf[j] = sw_stats[k];
618                 }
619                 if (is_tx_ring(bp, i)) {
620                         k = NUM_RING_RX_HW_STATS;
621                         for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
622                                j++, k++)
623                                 buf[j] = sw_stats[k];
624                 }
625                 if (!tpa_stats || !is_rx_ring(bp, i))
626                         goto skip_tpa_ring_stats;
627
628                 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
629                 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
630                            tpa_stats; j++, k++)
631                         buf[j] = sw_stats[k];
632
633 skip_tpa_ring_stats:
634                 sw = (u64 *)&cpr->sw_stats->rx;
635                 if (is_rx_ring(bp, i)) {
636                         for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
637                                 buf[j] = sw[k];
638                 }
639
640                 sw = (u64 *)&cpr->sw_stats->cmn;
641                 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
642                         buf[j] = sw[k];
643         }
644
645         bnxt_get_ring_err_stats(bp, &ring_err_stats);
646
647 skip_ring_stats:
648         curr = &ring_err_stats.rx_total_l4_csum_errors;
649         prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
650         for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
651                 buf[j] = *curr + *prev;
652
653         if (bp->flags & BNXT_FLAG_PORT_STATS) {
654                 u64 *port_stats = bp->port_stats.sw_stats;
655
656                 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
657                         buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
658         }
659         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
660                 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
661                 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
662                 u32 len;
663
664                 len = min_t(u32, bp->fw_rx_stats_ext_size,
665                             ARRAY_SIZE(bnxt_port_stats_ext_arr));
666                 for (i = 0; i < len; i++, j++) {
667                         buf[j] = *(rx_port_stats_ext +
668                                    bnxt_port_stats_ext_arr[i].offset);
669                 }
670                 len = min_t(u32, bp->fw_tx_stats_ext_size,
671                             ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
672                 for (i = 0; i < len; i++, j++) {
673                         buf[j] = *(tx_port_stats_ext +
674                                    bnxt_tx_port_stats_ext_arr[i].offset);
675                 }
676                 if (bp->pri2cos_valid) {
677                         for (i = 0; i < 8; i++, j++) {
678                                 long n = bnxt_rx_bytes_pri_arr[i].base_off +
679                                          bp->pri2cos_idx[i];
680
681                                 buf[j] = *(rx_port_stats_ext + n);
682                         }
683                         for (i = 0; i < 8; i++, j++) {
684                                 long n = bnxt_rx_pkts_pri_arr[i].base_off +
685                                          bp->pri2cos_idx[i];
686
687                                 buf[j] = *(rx_port_stats_ext + n);
688                         }
689                         for (i = 0; i < 8; i++, j++) {
690                                 long n = bnxt_tx_bytes_pri_arr[i].base_off +
691                                          bp->pri2cos_idx[i];
692
693                                 buf[j] = *(tx_port_stats_ext + n);
694                         }
695                         for (i = 0; i < 8; i++, j++) {
696                                 long n = bnxt_tx_pkts_pri_arr[i].base_off +
697                                          bp->pri2cos_idx[i];
698
699                                 buf[j] = *(tx_port_stats_ext + n);
700                         }
701                 }
702         }
703 }
704
705 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
706 {
707         struct bnxt *bp = netdev_priv(dev);
708         static const char * const *str;
709         u32 i, j, num_str;
710
711         switch (stringset) {
712         case ETH_SS_STATS:
713                 for (i = 0; i < bp->cp_nr_rings; i++) {
714                         if (is_rx_ring(bp, i)) {
715                                 num_str = NUM_RING_RX_HW_STATS;
716                                 for (j = 0; j < num_str; j++) {
717                                         sprintf(buf, "[%d]: %s", i,
718                                                 bnxt_ring_rx_stats_str[j]);
719                                         buf += ETH_GSTRING_LEN;
720                                 }
721                         }
722                         if (is_tx_ring(bp, i)) {
723                                 num_str = NUM_RING_TX_HW_STATS;
724                                 for (j = 0; j < num_str; j++) {
725                                         sprintf(buf, "[%d]: %s", i,
726                                                 bnxt_ring_tx_stats_str[j]);
727                                         buf += ETH_GSTRING_LEN;
728                                 }
729                         }
730                         num_str = bnxt_get_num_tpa_ring_stats(bp);
731                         if (!num_str || !is_rx_ring(bp, i))
732                                 goto skip_tpa_stats;
733
734                         if (bp->max_tpa_v2)
735                                 str = bnxt_ring_tpa2_stats_str;
736                         else
737                                 str = bnxt_ring_tpa_stats_str;
738
739                         for (j = 0; j < num_str; j++) {
740                                 sprintf(buf, "[%d]: %s", i, str[j]);
741                                 buf += ETH_GSTRING_LEN;
742                         }
743 skip_tpa_stats:
744                         if (is_rx_ring(bp, i)) {
745                                 num_str = NUM_RING_RX_SW_STATS;
746                                 for (j = 0; j < num_str; j++) {
747                                         sprintf(buf, "[%d]: %s", i,
748                                                 bnxt_rx_sw_stats_str[j]);
749                                         buf += ETH_GSTRING_LEN;
750                                 }
751                         }
752                         num_str = NUM_RING_CMN_SW_STATS;
753                         for (j = 0; j < num_str; j++) {
754                                 sprintf(buf, "[%d]: %s", i,
755                                         bnxt_cmn_sw_stats_str[j]);
756                                 buf += ETH_GSTRING_LEN;
757                         }
758                 }
759                 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
760                         strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
761                         buf += ETH_GSTRING_LEN;
762                 }
763
764                 if (bp->flags & BNXT_FLAG_PORT_STATS) {
765                         for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
766                                 strcpy(buf, bnxt_port_stats_arr[i].string);
767                                 buf += ETH_GSTRING_LEN;
768                         }
769                 }
770                 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
771                         u32 len;
772
773                         len = min_t(u32, bp->fw_rx_stats_ext_size,
774                                     ARRAY_SIZE(bnxt_port_stats_ext_arr));
775                         for (i = 0; i < len; i++) {
776                                 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
777                                 buf += ETH_GSTRING_LEN;
778                         }
779                         len = min_t(u32, bp->fw_tx_stats_ext_size,
780                                     ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
781                         for (i = 0; i < len; i++) {
782                                 strcpy(buf,
783                                        bnxt_tx_port_stats_ext_arr[i].string);
784                                 buf += ETH_GSTRING_LEN;
785                         }
786                         if (bp->pri2cos_valid) {
787                                 for (i = 0; i < 8; i++) {
788                                         strcpy(buf,
789                                                bnxt_rx_bytes_pri_arr[i].string);
790                                         buf += ETH_GSTRING_LEN;
791                                 }
792                                 for (i = 0; i < 8; i++) {
793                                         strcpy(buf,
794                                                bnxt_rx_pkts_pri_arr[i].string);
795                                         buf += ETH_GSTRING_LEN;
796                                 }
797                                 for (i = 0; i < 8; i++) {
798                                         strcpy(buf,
799                                                bnxt_tx_bytes_pri_arr[i].string);
800                                         buf += ETH_GSTRING_LEN;
801                                 }
802                                 for (i = 0; i < 8; i++) {
803                                         strcpy(buf,
804                                                bnxt_tx_pkts_pri_arr[i].string);
805                                         buf += ETH_GSTRING_LEN;
806                                 }
807                         }
808                 }
809                 break;
810         case ETH_SS_TEST:
811                 if (bp->num_tests)
812                         memcpy(buf, bp->test_info->string,
813                                bp->num_tests * ETH_GSTRING_LEN);
814                 break;
815         default:
816                 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
817                            stringset);
818                 break;
819         }
820 }
821
822 static void bnxt_get_ringparam(struct net_device *dev,
823                                struct ethtool_ringparam *ering,
824                                struct kernel_ethtool_ringparam *kernel_ering,
825                                struct netlink_ext_ack *extack)
826 {
827         struct bnxt *bp = netdev_priv(dev);
828
829         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
830                 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
831                 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
832                 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
833         } else {
834                 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
835                 ering->rx_jumbo_max_pending = 0;
836                 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
837         }
838         ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
839
840         ering->rx_pending = bp->rx_ring_size;
841         ering->rx_jumbo_pending = bp->rx_agg_ring_size;
842         ering->tx_pending = bp->tx_ring_size;
843 }
844
845 static int bnxt_set_ringparam(struct net_device *dev,
846                               struct ethtool_ringparam *ering,
847                               struct kernel_ethtool_ringparam *kernel_ering,
848                               struct netlink_ext_ack *extack)
849 {
850         struct bnxt *bp = netdev_priv(dev);
851
852         if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
853             (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
854             (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
855                 return -EINVAL;
856
857         if (netif_running(dev))
858                 bnxt_close_nic(bp, false, false);
859
860         bp->rx_ring_size = ering->rx_pending;
861         bp->tx_ring_size = ering->tx_pending;
862         bnxt_set_ring_params(bp);
863
864         if (netif_running(dev))
865                 return bnxt_open_nic(bp, false, false);
866
867         return 0;
868 }
869
870 static void bnxt_get_channels(struct net_device *dev,
871                               struct ethtool_channels *channel)
872 {
873         struct bnxt *bp = netdev_priv(dev);
874         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
875         int max_rx_rings, max_tx_rings, tcs;
876         int max_tx_sch_inputs, tx_grps;
877
878         /* Get the most up-to-date max_tx_sch_inputs. */
879         if (netif_running(dev) && BNXT_NEW_RM(bp))
880                 bnxt_hwrm_func_resc_qcaps(bp, false);
881         max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
882
883         bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
884         if (max_tx_sch_inputs)
885                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
886
887         tcs = bp->num_tc;
888         tx_grps = max(tcs, 1);
889         if (bp->tx_nr_rings_xdp)
890                 tx_grps++;
891         max_tx_rings /= tx_grps;
892         channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
893
894         if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
895                 max_rx_rings = 0;
896                 max_tx_rings = 0;
897         }
898         if (max_tx_sch_inputs)
899                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
900
901         if (tcs > 1)
902                 max_tx_rings /= tcs;
903
904         channel->max_rx = max_rx_rings;
905         channel->max_tx = max_tx_rings;
906         channel->max_other = 0;
907         if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
908                 channel->combined_count = bp->rx_nr_rings;
909                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
910                         channel->combined_count--;
911         } else {
912                 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
913                         channel->rx_count = bp->rx_nr_rings;
914                         channel->tx_count = bp->tx_nr_rings_per_tc;
915                 }
916         }
917 }
918
919 static int bnxt_set_channels(struct net_device *dev,
920                              struct ethtool_channels *channel)
921 {
922         struct bnxt *bp = netdev_priv(dev);
923         int req_tx_rings, req_rx_rings, tcs;
924         bool sh = false;
925         int tx_xdp = 0;
926         int rc = 0;
927         int tx_cp;
928
929         if (channel->other_count)
930                 return -EINVAL;
931
932         if (!channel->combined_count &&
933             (!channel->rx_count || !channel->tx_count))
934                 return -EINVAL;
935
936         if (channel->combined_count &&
937             (channel->rx_count || channel->tx_count))
938                 return -EINVAL;
939
940         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
941                                             channel->tx_count))
942                 return -EINVAL;
943
944         if (channel->combined_count)
945                 sh = true;
946
947         tcs = bp->num_tc;
948
949         req_tx_rings = sh ? channel->combined_count : channel->tx_count;
950         req_rx_rings = sh ? channel->combined_count : channel->rx_count;
951         if (bp->tx_nr_rings_xdp) {
952                 if (!sh) {
953                         netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
954                         return -EINVAL;
955                 }
956                 tx_xdp = req_rx_rings;
957         }
958
959         if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
960             bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
961             netif_is_rxfh_configured(dev)) {
962                 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
963                 return -EINVAL;
964         }
965
966         rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
967         if (rc) {
968                 netdev_warn(dev, "Unable to allocate the requested rings\n");
969                 return rc;
970         }
971
972         if (netif_running(dev)) {
973                 if (BNXT_PF(bp)) {
974                         /* TODO CHIMP_FW: Send message to all VF's
975                          * before PF unload
976                          */
977                 }
978                 bnxt_close_nic(bp, true, false);
979         }
980
981         if (sh) {
982                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
983                 bp->rx_nr_rings = channel->combined_count;
984                 bp->tx_nr_rings_per_tc = channel->combined_count;
985         } else {
986                 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
987                 bp->rx_nr_rings = channel->rx_count;
988                 bp->tx_nr_rings_per_tc = channel->tx_count;
989         }
990         bp->tx_nr_rings_xdp = tx_xdp;
991         bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
992         if (tcs > 1)
993                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
994
995         tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
996         bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
997                                tx_cp + bp->rx_nr_rings;
998
999         /* After changing number of rx channels, update NTUPLE feature. */
1000         netdev_update_features(dev);
1001         if (netif_running(dev)) {
1002                 rc = bnxt_open_nic(bp, true, false);
1003                 if ((!rc) && BNXT_PF(bp)) {
1004                         /* TODO CHIMP_FW: Send message to all VF's
1005                          * to renable
1006                          */
1007                 }
1008         } else {
1009                 rc = bnxt_reserve_rings(bp, true);
1010         }
1011
1012         return rc;
1013 }
1014
1015 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
1016                                      int tbl_size, u32 *ids, u32 start,
1017                                      u32 id_cnt)
1018 {
1019         int i, j = start;
1020
1021         if (j >= id_cnt)
1022                 return j;
1023         for (i = 0; i < tbl_size; i++) {
1024                 struct hlist_head *head;
1025                 struct bnxt_filter_base *fltr;
1026
1027                 head = &tbl[i];
1028                 hlist_for_each_entry_rcu(fltr, head, hash) {
1029                         if (!fltr->flags ||
1030                             test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
1031                                 continue;
1032                         ids[j++] = fltr->sw_id;
1033                         if (j == id_cnt)
1034                                 return j;
1035                 }
1036         }
1037         return j;
1038 }
1039
1040 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
1041                                                       struct hlist_head tbl[],
1042                                                       int tbl_size, u32 id)
1043 {
1044         int i;
1045
1046         for (i = 0; i < tbl_size; i++) {
1047                 struct hlist_head *head;
1048                 struct bnxt_filter_base *fltr;
1049
1050                 head = &tbl[i];
1051                 hlist_for_each_entry_rcu(fltr, head, hash) {
1052                         if (fltr->flags && fltr->sw_id == id)
1053                                 return fltr;
1054                 }
1055         }
1056         return NULL;
1057 }
1058
1059 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1060                             u32 *rule_locs)
1061 {
1062         u32 count;
1063
1064         cmd->data = bp->ntp_fltr_count;
1065         rcu_read_lock();
1066         count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
1067                                           BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
1068                                           cmd->rule_cnt);
1069         cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
1070                                                   BNXT_NTP_FLTR_HASH_SIZE,
1071                                                   rule_locs, count,
1072                                                   cmd->rule_cnt);
1073         rcu_read_unlock();
1074
1075         return 0;
1076 }
1077
1078 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1079 {
1080         struct ethtool_rx_flow_spec *fs =
1081                 (struct ethtool_rx_flow_spec *)&cmd->fs;
1082         struct bnxt_filter_base *fltr_base;
1083         struct bnxt_ntuple_filter *fltr;
1084         struct bnxt_flow_masks *fmasks;
1085         struct flow_keys *fkeys;
1086         int rc = -EINVAL;
1087
1088         if (fs->location >= bp->max_fltr)
1089                 return rc;
1090
1091         rcu_read_lock();
1092         fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
1093                                           BNXT_L2_FLTR_HASH_SIZE,
1094                                           fs->location);
1095         if (fltr_base) {
1096                 struct ethhdr *h_ether = &fs->h_u.ether_spec;
1097                 struct ethhdr *m_ether = &fs->m_u.ether_spec;
1098                 struct bnxt_l2_filter *l2_fltr;
1099                 struct bnxt_l2_key *l2_key;
1100
1101                 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
1102                 l2_key = &l2_fltr->l2_key;
1103                 fs->flow_type = ETHER_FLOW;
1104                 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
1105                 eth_broadcast_addr(m_ether->h_dest);
1106                 if (l2_key->vlan) {
1107                         struct ethtool_flow_ext *m_ext = &fs->m_ext;
1108                         struct ethtool_flow_ext *h_ext = &fs->h_ext;
1109
1110                         fs->flow_type |= FLOW_EXT;
1111                         m_ext->vlan_tci = htons(0xfff);
1112                         h_ext->vlan_tci = htons(l2_key->vlan);
1113                 }
1114                 if (fltr_base->flags & BNXT_ACT_RING_DST)
1115                         fs->ring_cookie = fltr_base->rxq;
1116                 if (fltr_base->flags & BNXT_ACT_FUNC_DST)
1117                         fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
1118                                           ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1119                 rcu_read_unlock();
1120                 return 0;
1121         }
1122         fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1123                                           BNXT_NTP_FLTR_HASH_SIZE,
1124                                           fs->location);
1125         if (!fltr_base) {
1126                 rcu_read_unlock();
1127                 return rc;
1128         }
1129         fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1130
1131         fkeys = &fltr->fkeys;
1132         fmasks = &fltr->fmasks;
1133         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1134                 if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
1135                     fkeys->basic.ip_proto == IPPROTO_RAW) {
1136                         fs->flow_type = IP_USER_FLOW;
1137                         fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1138                         if (fkeys->basic.ip_proto == IPPROTO_ICMP)
1139                                 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
1140                         else
1141                                 fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
1142                         fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
1143                 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
1144                         fs->flow_type = TCP_V4_FLOW;
1145                 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
1146                         fs->flow_type = UDP_V4_FLOW;
1147                 } else {
1148                         goto fltr_err;
1149                 }
1150
1151                 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1152                 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
1153                 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1154                 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
1155                 if (fs->flow_type == TCP_V4_FLOW ||
1156                     fs->flow_type == UDP_V4_FLOW) {
1157                         fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1158                         fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
1159                         fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1160                         fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
1161                 }
1162         } else {
1163                 if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
1164                     fkeys->basic.ip_proto == IPPROTO_RAW) {
1165                         fs->flow_type = IPV6_USER_FLOW;
1166                         if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
1167                                 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
1168                         else
1169                                 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
1170                         fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
1171                 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
1172                         fs->flow_type = TCP_V6_FLOW;
1173                 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
1174                         fs->flow_type = UDP_V6_FLOW;
1175                 } else {
1176                         goto fltr_err;
1177                 }
1178
1179                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1180                         fkeys->addrs.v6addrs.src;
1181                 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
1182                         fmasks->addrs.v6addrs.src;
1183                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1184                         fkeys->addrs.v6addrs.dst;
1185                 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
1186                         fmasks->addrs.v6addrs.dst;
1187                 if (fs->flow_type == TCP_V6_FLOW ||
1188                     fs->flow_type == UDP_V6_FLOW) {
1189                         fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1190                         fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
1191                         fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1192                         fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
1193                 }
1194         }
1195
1196         if (fltr->base.flags & BNXT_ACT_DROP)
1197                 fs->ring_cookie = RX_CLS_FLOW_DISC;
1198         else
1199                 fs->ring_cookie = fltr->base.rxq;
1200         rc = 0;
1201
1202 fltr_err:
1203         rcu_read_unlock();
1204
1205         return rc;
1206 }
1207
1208 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
1209                                                         u32 index)
1210 {
1211         struct ethtool_rxfh_context *ctx;
1212
1213         ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
1214         if (!ctx)
1215                 return NULL;
1216         return ethtool_rxfh_context_priv(ctx);
1217 }
1218
1219 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
1220                                      struct bnxt_vnic_info *vnic)
1221 {
1222         int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
1223
1224         vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
1225         vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
1226                                              vnic->rss_table_size,
1227                                              &vnic->rss_table_dma_addr,
1228                                              GFP_KERNEL);
1229         if (!vnic->rss_table)
1230                 return -ENOMEM;
1231
1232         vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
1233         vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
1234         return 0;
1235 }
1236
1237 static int bnxt_add_l2_cls_rule(struct bnxt *bp,
1238                                 struct ethtool_rx_flow_spec *fs)
1239 {
1240         u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1241         u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1242         struct ethhdr *h_ether = &fs->h_u.ether_spec;
1243         struct ethhdr *m_ether = &fs->m_u.ether_spec;
1244         struct bnxt_l2_filter *fltr;
1245         struct bnxt_l2_key key;
1246         u16 vnic_id;
1247         u8 flags;
1248         int rc;
1249
1250         if (BNXT_CHIP_P5_PLUS(bp))
1251                 return -EOPNOTSUPP;
1252
1253         if (!is_broadcast_ether_addr(m_ether->h_dest))
1254                 return -EINVAL;
1255         ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
1256         key.vlan = 0;
1257         if (fs->flow_type & FLOW_EXT) {
1258                 struct ethtool_flow_ext *m_ext = &fs->m_ext;
1259                 struct ethtool_flow_ext *h_ext = &fs->h_ext;
1260
1261                 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
1262                         return -EINVAL;
1263                 key.vlan = ntohs(h_ext->vlan_tci);
1264         }
1265
1266         if (vf) {
1267                 flags = BNXT_ACT_FUNC_DST;
1268                 vnic_id = 0xffff;
1269                 vf--;
1270         } else {
1271                 flags = BNXT_ACT_RING_DST;
1272                 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
1273         }
1274         fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
1275         if (IS_ERR(fltr))
1276                 return PTR_ERR(fltr);
1277
1278         fltr->base.fw_vnic_id = vnic_id;
1279         fltr->base.rxq = ring;
1280         fltr->base.vf_idx = vf;
1281         rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
1282         if (rc)
1283                 bnxt_del_l2_filter(bp, fltr);
1284         else
1285                 fs->location = fltr->base.sw_id;
1286         return rc;
1287 }
1288
1289 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
1290                                         struct ethtool_usrip4_spec *ip_mask)
1291 {
1292         if (ip_mask->l4_4_bytes || ip_mask->tos ||
1293             ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
1294             ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
1295             (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
1296                 return false;
1297         return true;
1298 }
1299
1300 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
1301                                         struct ethtool_usrip6_spec *ip_mask)
1302 {
1303         if (ip_mask->l4_4_bytes || ip_mask->tclass ||
1304             ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
1305             (ip_spec->l4_proto != IPPROTO_RAW &&
1306              ip_spec->l4_proto != IPPROTO_ICMPV6))
1307                 return false;
1308         return true;
1309 }
1310
1311 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
1312                                     struct ethtool_rxnfc *cmd)
1313 {
1314         struct ethtool_rx_flow_spec *fs = &cmd->fs;
1315         struct bnxt_ntuple_filter *new_fltr, *fltr;
1316         u32 flow_type = fs->flow_type & 0xff;
1317         struct bnxt_l2_filter *l2_fltr;
1318         struct bnxt_flow_masks *fmasks;
1319         struct flow_keys *fkeys;
1320         u32 idx, ring;
1321         int rc;
1322         u8 vf;
1323
1324         if (!bp->vnic_info)
1325                 return -EAGAIN;
1326
1327         vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1328         ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1329         if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
1330                 return -EOPNOTSUPP;
1331
1332         if (flow_type == IP_USER_FLOW) {
1333                 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
1334                                                  &fs->m_u.usr_ip4_spec))
1335                         return -EOPNOTSUPP;
1336         }
1337
1338         if (flow_type == IPV6_USER_FLOW) {
1339                 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
1340                                                  &fs->m_u.usr_ip6_spec))
1341                         return -EOPNOTSUPP;
1342         }
1343
1344         new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
1345         if (!new_fltr)
1346                 return -ENOMEM;
1347
1348         l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
1349         atomic_inc(&l2_fltr->refcnt);
1350         new_fltr->l2_fltr = l2_fltr;
1351         fmasks = &new_fltr->fmasks;
1352         fkeys = &new_fltr->fkeys;
1353
1354         rc = -EOPNOTSUPP;
1355         switch (flow_type) {
1356         case IP_USER_FLOW: {
1357                 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
1358                 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
1359
1360                 fkeys->basic.ip_proto = ip_spec->proto;
1361                 fkeys->basic.n_proto = htons(ETH_P_IP);
1362                 fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1363                 fmasks->addrs.v4addrs.src = ip_mask->ip4src;
1364                 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1365                 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
1366                 break;
1367         }
1368         case TCP_V4_FLOW:
1369         case UDP_V4_FLOW: {
1370                 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
1371                 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
1372
1373                 fkeys->basic.ip_proto = IPPROTO_TCP;
1374                 if (flow_type == UDP_V4_FLOW)
1375                         fkeys->basic.ip_proto = IPPROTO_UDP;
1376                 fkeys->basic.n_proto = htons(ETH_P_IP);
1377                 fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1378                 fmasks->addrs.v4addrs.src = ip_mask->ip4src;
1379                 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1380                 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
1381                 fkeys->ports.src = ip_spec->psrc;
1382                 fmasks->ports.src = ip_mask->psrc;
1383                 fkeys->ports.dst = ip_spec->pdst;
1384                 fmasks->ports.dst = ip_mask->pdst;
1385                 break;
1386         }
1387         case IPV6_USER_FLOW: {
1388                 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
1389                 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
1390
1391                 fkeys->basic.ip_proto = ip_spec->l4_proto;
1392                 fkeys->basic.n_proto = htons(ETH_P_IPV6);
1393                 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
1394                 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
1395                 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
1396                 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
1397                 break;
1398         }
1399         case TCP_V6_FLOW:
1400         case UDP_V6_FLOW: {
1401                 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
1402                 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
1403
1404                 fkeys->basic.ip_proto = IPPROTO_TCP;
1405                 if (flow_type == UDP_V6_FLOW)
1406                         fkeys->basic.ip_proto = IPPROTO_UDP;
1407                 fkeys->basic.n_proto = htons(ETH_P_IPV6);
1408
1409                 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
1410                 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
1411                 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
1412                 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
1413                 fkeys->ports.src = ip_spec->psrc;
1414                 fmasks->ports.src = ip_mask->psrc;
1415                 fkeys->ports.dst = ip_spec->pdst;
1416                 fmasks->ports.dst = ip_mask->pdst;
1417                 break;
1418         }
1419         default:
1420                 rc = -EOPNOTSUPP;
1421                 goto ntuple_err;
1422         }
1423         if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
1424                 goto ntuple_err;
1425
1426         idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
1427         rcu_read_lock();
1428         fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
1429         if (fltr) {
1430                 rcu_read_unlock();
1431                 rc = -EEXIST;
1432                 goto ntuple_err;
1433         }
1434         rcu_read_unlock();
1435
1436         new_fltr->base.flags = BNXT_ACT_NO_AGING;
1437         if (fs->flow_type & FLOW_RSS) {
1438                 struct bnxt_rss_ctx *rss_ctx;
1439
1440                 new_fltr->base.fw_vnic_id = 0;
1441                 new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
1442                 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
1443                 if (rss_ctx) {
1444                         new_fltr->base.fw_vnic_id = rss_ctx->index;
1445                 } else {
1446                         rc = -EINVAL;
1447                         goto ntuple_err;
1448                 }
1449         }
1450         if (fs->ring_cookie == RX_CLS_FLOW_DISC)
1451                 new_fltr->base.flags |= BNXT_ACT_DROP;
1452         else
1453                 new_fltr->base.rxq = ring;
1454         __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
1455         rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
1456         if (!rc) {
1457                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
1458                 if (rc) {
1459                         bnxt_del_ntp_filter(bp, new_fltr);
1460                         return rc;
1461                 }
1462                 fs->location = new_fltr->base.sw_id;
1463                 return 0;
1464         }
1465
1466 ntuple_err:
1467         atomic_dec(&l2_fltr->refcnt);
1468         kfree(new_fltr);
1469         return rc;
1470 }
1471
1472 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1473 {
1474         struct ethtool_rx_flow_spec *fs = &cmd->fs;
1475         u32 ring, flow_type;
1476         int rc;
1477         u8 vf;
1478
1479         if (!netif_running(bp->dev))
1480                 return -EAGAIN;
1481         if (!(bp->flags & BNXT_FLAG_RFS))
1482                 return -EPERM;
1483         if (fs->location != RX_CLS_LOC_ANY)
1484                 return -EINVAL;
1485
1486         flow_type = fs->flow_type;
1487         if ((flow_type == IP_USER_FLOW ||
1488              flow_type == IPV6_USER_FLOW) &&
1489             !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
1490                 return -EOPNOTSUPP;
1491         if (flow_type & FLOW_MAC_EXT)
1492                 return -EINVAL;
1493         flow_type &= ~FLOW_EXT;
1494
1495         if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
1496                 return bnxt_add_ntuple_cls_rule(bp, cmd);
1497
1498         ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1499         vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1500         if (BNXT_VF(bp) && vf)
1501                 return -EINVAL;
1502         if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
1503                 return -EINVAL;
1504         if (!vf && ring >= bp->rx_nr_rings)
1505                 return -EINVAL;
1506
1507         if (flow_type == ETHER_FLOW)
1508                 rc = bnxt_add_l2_cls_rule(bp, fs);
1509         else
1510                 rc = bnxt_add_ntuple_cls_rule(bp, cmd);
1511         return rc;
1512 }
1513
1514 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1515 {
1516         struct ethtool_rx_flow_spec *fs = &cmd->fs;
1517         struct bnxt_filter_base *fltr_base;
1518         struct bnxt_ntuple_filter *fltr;
1519         u32 id = fs->location;
1520
1521         rcu_read_lock();
1522         fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
1523                                           BNXT_L2_FLTR_HASH_SIZE, id);
1524         if (fltr_base) {
1525                 struct bnxt_l2_filter *l2_fltr;
1526
1527                 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
1528                 rcu_read_unlock();
1529                 bnxt_hwrm_l2_filter_free(bp, l2_fltr);
1530                 bnxt_del_l2_filter(bp, l2_fltr);
1531                 return 0;
1532         }
1533         fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1534                                           BNXT_NTP_FLTR_HASH_SIZE, id);
1535         if (!fltr_base) {
1536                 rcu_read_unlock();
1537                 return -ENOENT;
1538         }
1539
1540         fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1541         if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
1542                 rcu_read_unlock();
1543                 return -EINVAL;
1544         }
1545         rcu_read_unlock();
1546         bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
1547         bnxt_del_ntp_filter(bp, fltr);
1548         return 0;
1549 }
1550
1551 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1552 {
1553         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1554                 return RXH_IP_SRC | RXH_IP_DST;
1555         return 0;
1556 }
1557
1558 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1559 {
1560         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1561                 return RXH_IP_SRC | RXH_IP_DST;
1562         return 0;
1563 }
1564
1565 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1566 {
1567         cmd->data = 0;
1568         switch (cmd->flow_type) {
1569         case TCP_V4_FLOW:
1570                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1571                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1572                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1573                 cmd->data |= get_ethtool_ipv4_rss(bp);
1574                 break;
1575         case UDP_V4_FLOW:
1576                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1577                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1578                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1579                 fallthrough;
1580         case AH_ESP_V4_FLOW:
1581                 if (bp->rss_hash_cfg &
1582                     (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1583                      VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
1584                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1585                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1586                 fallthrough;
1587         case SCTP_V4_FLOW:
1588         case AH_V4_FLOW:
1589         case ESP_V4_FLOW:
1590         case IPV4_FLOW:
1591                 cmd->data |= get_ethtool_ipv4_rss(bp);
1592                 break;
1593
1594         case TCP_V6_FLOW:
1595                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1596                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1597                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1598                 cmd->data |= get_ethtool_ipv6_rss(bp);
1599                 break;
1600         case UDP_V6_FLOW:
1601                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1602                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1603                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1604                 fallthrough;
1605         case AH_ESP_V6_FLOW:
1606                 if (bp->rss_hash_cfg &
1607                     (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1608                      VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
1609                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1610                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1611                 fallthrough;
1612         case SCTP_V6_FLOW:
1613         case AH_V6_FLOW:
1614         case ESP_V6_FLOW:
1615         case IPV6_FLOW:
1616                 cmd->data |= get_ethtool_ipv6_rss(bp);
1617                 break;
1618         }
1619         return 0;
1620 }
1621
1622 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1623 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1624
1625 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1626 {
1627         u32 rss_hash_cfg = bp->rss_hash_cfg;
1628         int tuple, rc = 0;
1629
1630         if (cmd->data == RXH_4TUPLE)
1631                 tuple = 4;
1632         else if (cmd->data == RXH_2TUPLE)
1633                 tuple = 2;
1634         else if (!cmd->data)
1635                 tuple = 0;
1636         else
1637                 return -EINVAL;
1638
1639         if (cmd->flow_type == TCP_V4_FLOW) {
1640                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1641                 if (tuple == 4)
1642                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1643         } else if (cmd->flow_type == UDP_V4_FLOW) {
1644                 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1645                         return -EINVAL;
1646                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1647                 if (tuple == 4)
1648                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1649         } else if (cmd->flow_type == TCP_V6_FLOW) {
1650                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1651                 if (tuple == 4)
1652                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1653         } else if (cmd->flow_type == UDP_V6_FLOW) {
1654                 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1655                         return -EINVAL;
1656                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1657                 if (tuple == 4)
1658                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1659         } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
1660                 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
1661                                    !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
1662                         return -EINVAL;
1663                 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1664                                   VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
1665                 if (tuple == 4)
1666                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1667                                         VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
1668         } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
1669                 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
1670                                    !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
1671                         return -EINVAL;
1672                 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1673                                   VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
1674                 if (tuple == 4)
1675                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1676                                         VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
1677         } else if (tuple == 4) {
1678                 return -EINVAL;
1679         }
1680
1681         switch (cmd->flow_type) {
1682         case TCP_V4_FLOW:
1683         case UDP_V4_FLOW:
1684         case SCTP_V4_FLOW:
1685         case AH_ESP_V4_FLOW:
1686         case AH_V4_FLOW:
1687         case ESP_V4_FLOW:
1688         case IPV4_FLOW:
1689                 if (tuple == 2)
1690                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1691                 else if (!tuple)
1692                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1693                 break;
1694
1695         case TCP_V6_FLOW:
1696         case UDP_V6_FLOW:
1697         case SCTP_V6_FLOW:
1698         case AH_ESP_V6_FLOW:
1699         case AH_V6_FLOW:
1700         case ESP_V6_FLOW:
1701         case IPV6_FLOW:
1702                 if (tuple == 2)
1703                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1704                 else if (!tuple)
1705                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1706                 break;
1707         }
1708
1709         if (bp->rss_hash_cfg == rss_hash_cfg)
1710                 return 0;
1711
1712         if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
1713                 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1714         bp->rss_hash_cfg = rss_hash_cfg;
1715         if (netif_running(bp->dev)) {
1716                 bnxt_close_nic(bp, false, false);
1717                 rc = bnxt_open_nic(bp, false, false);
1718         }
1719         return rc;
1720 }
1721
1722 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1723                           u32 *rule_locs)
1724 {
1725         struct bnxt *bp = netdev_priv(dev);
1726         int rc = 0;
1727
1728         switch (cmd->cmd) {
1729         case ETHTOOL_GRXRINGS:
1730                 cmd->data = bp->rx_nr_rings;
1731                 break;
1732
1733         case ETHTOOL_GRXCLSRLCNT:
1734                 cmd->rule_cnt = bp->ntp_fltr_count;
1735                 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
1736                 break;
1737
1738         case ETHTOOL_GRXCLSRLALL:
1739                 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1740                 break;
1741
1742         case ETHTOOL_GRXCLSRULE:
1743                 rc = bnxt_grxclsrule(bp, cmd);
1744                 break;
1745
1746         case ETHTOOL_GRXFH:
1747                 rc = bnxt_grxfh(bp, cmd);
1748                 break;
1749
1750         default:
1751                 rc = -EOPNOTSUPP;
1752                 break;
1753         }
1754
1755         return rc;
1756 }
1757
1758 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1759 {
1760         struct bnxt *bp = netdev_priv(dev);
1761         int rc;
1762
1763         switch (cmd->cmd) {
1764         case ETHTOOL_SRXFH:
1765                 rc = bnxt_srxfh(bp, cmd);
1766                 break;
1767
1768         case ETHTOOL_SRXCLSRLINS:
1769                 rc = bnxt_srxclsrlins(bp, cmd);
1770                 break;
1771
1772         case ETHTOOL_SRXCLSRLDEL:
1773                 rc = bnxt_srxclsrldel(bp, cmd);
1774                 break;
1775
1776         default:
1777                 rc = -EOPNOTSUPP;
1778                 break;
1779         }
1780         return rc;
1781 }
1782
1783 u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1784 {
1785         struct bnxt *bp = netdev_priv(dev);
1786
1787         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1788                 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
1789                        BNXT_RSS_TABLE_ENTRIES_P5;
1790         return HW_HASH_INDEX_SIZE;
1791 }
1792
1793 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1794 {
1795         return HW_HASH_KEY_SIZE;
1796 }
1797
1798 static int bnxt_get_rxfh(struct net_device *dev,
1799                          struct ethtool_rxfh_param *rxfh)
1800 {
1801         struct bnxt_rss_ctx *rss_ctx = NULL;
1802         struct bnxt *bp = netdev_priv(dev);
1803         u32 *indir_tbl = bp->rss_indir_tbl;
1804         struct bnxt_vnic_info *vnic;
1805         u32 i, tbl_size;
1806
1807         rxfh->hfunc = ETH_RSS_HASH_TOP;
1808
1809         if (!bp->vnic_info)
1810                 return 0;
1811
1812         vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
1813         if (rxfh->rss_context) {
1814                 struct ethtool_rxfh_context *ctx;
1815
1816                 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
1817                 if (!ctx)
1818                         return -EINVAL;
1819                 indir_tbl = ethtool_rxfh_context_indir(ctx);
1820                 rss_ctx = ethtool_rxfh_context_priv(ctx);
1821                 vnic = &rss_ctx->vnic;
1822         }
1823
1824         if (rxfh->indir && indir_tbl) {
1825                 tbl_size = bnxt_get_rxfh_indir_size(dev);
1826                 for (i = 0; i < tbl_size; i++)
1827                         rxfh->indir[i] = indir_tbl[i];
1828         }
1829
1830         if (rxfh->key && vnic->rss_hash_key)
1831                 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1832
1833         return 0;
1834 }
1835
1836 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
1837                             struct bnxt_rss_ctx *rss_ctx,
1838                             const struct ethtool_rxfh_param *rxfh)
1839 {
1840         if (rxfh->key) {
1841                 if (rss_ctx) {
1842                         memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
1843                                HW_HASH_KEY_SIZE);
1844                 } else {
1845                         memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
1846                         bp->rss_hash_key_updated = true;
1847                 }
1848         }
1849         if (rxfh->indir) {
1850                 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
1851                 u32 *indir_tbl = bp->rss_indir_tbl;
1852
1853                 if (rss_ctx)
1854                         indir_tbl = ethtool_rxfh_context_indir(ctx);
1855                 for (i = 0; i < tbl_size; i++)
1856                         indir_tbl[i] = rxfh->indir[i];
1857                 pad = bp->rss_indir_tbl_entries - tbl_size;
1858                 if (pad)
1859                         memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
1860         }
1861 }
1862
1863 static int bnxt_rxfh_context_check(struct bnxt *bp,
1864                                    const struct ethtool_rxfh_param *rxfh,
1865                                    struct netlink_ext_ack *extack)
1866 {
1867         if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1868                 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1869                 return -EOPNOTSUPP;
1870         }
1871
1872         if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
1873                 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
1874                 return -EOPNOTSUPP;
1875         }
1876
1877         if (!netif_running(bp->dev)) {
1878                 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
1879                 return -EAGAIN;
1880         }
1881
1882         return 0;
1883 }
1884
1885 static int bnxt_create_rxfh_context(struct net_device *dev,
1886                                     struct ethtool_rxfh_context *ctx,
1887                                     const struct ethtool_rxfh_param *rxfh,
1888                                     struct netlink_ext_ack *extack)
1889 {
1890         struct bnxt *bp = netdev_priv(dev);
1891         struct bnxt_rss_ctx *rss_ctx;
1892         struct bnxt_vnic_info *vnic;
1893         int rc;
1894
1895         rc = bnxt_rxfh_context_check(bp, rxfh, extack);
1896         if (rc)
1897                 return rc;
1898
1899         if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
1900                 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
1901                                        BNXT_MAX_ETH_RSS_CTX);
1902                 return -EINVAL;
1903         }
1904
1905         if (!bnxt_rfs_capable(bp, true)) {
1906                 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
1907                 return -ENOMEM;
1908         }
1909
1910         rss_ctx = ethtool_rxfh_context_priv(ctx);
1911
1912         bp->num_rss_ctx++;
1913
1914         vnic = &rss_ctx->vnic;
1915         vnic->rss_ctx = ctx;
1916         vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
1917         vnic->vnic_id = BNXT_VNIC_ID_INVALID;
1918         rc = bnxt_alloc_vnic_rss_table(bp, vnic);
1919         if (rc)
1920                 goto out;
1921
1922         /* Populate defaults in the context */
1923         bnxt_set_dflt_rss_indir_tbl(bp, ctx);
1924         ctx->hfunc = ETH_RSS_HASH_TOP;
1925         memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
1926         memcpy(ethtool_rxfh_context_key(ctx),
1927                bp->rss_hash_key, HW_HASH_KEY_SIZE);
1928
1929         rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
1930         if (rc) {
1931                 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
1932                 goto out;
1933         }
1934
1935         rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
1936         if (rc) {
1937                 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
1938                 goto out;
1939         }
1940         bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
1941
1942         rc = __bnxt_setup_vnic_p5(bp, vnic);
1943         if (rc) {
1944                 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
1945                 goto out;
1946         }
1947
1948         rss_ctx->index = rxfh->rss_context;
1949         return 0;
1950 out:
1951         bnxt_del_one_rss_ctx(bp, rss_ctx, true);
1952         return rc;
1953 }
1954
1955 static int bnxt_modify_rxfh_context(struct net_device *dev,
1956                                     struct ethtool_rxfh_context *ctx,
1957                                     const struct ethtool_rxfh_param *rxfh,
1958                                     struct netlink_ext_ack *extack)
1959 {
1960         struct bnxt *bp = netdev_priv(dev);
1961         struct bnxt_rss_ctx *rss_ctx;
1962         int rc;
1963
1964         rc = bnxt_rxfh_context_check(bp, rxfh, extack);
1965         if (rc)
1966                 return rc;
1967
1968         rss_ctx = ethtool_rxfh_context_priv(ctx);
1969
1970         bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
1971
1972         return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
1973 }
1974
1975 static int bnxt_remove_rxfh_context(struct net_device *dev,
1976                                     struct ethtool_rxfh_context *ctx,
1977                                     u32 rss_context,
1978                                     struct netlink_ext_ack *extack)
1979 {
1980         struct bnxt *bp = netdev_priv(dev);
1981         struct bnxt_rss_ctx *rss_ctx;
1982
1983         rss_ctx = ethtool_rxfh_context_priv(ctx);
1984
1985         bnxt_del_one_rss_ctx(bp, rss_ctx, true);
1986         return 0;
1987 }
1988
1989 static int bnxt_set_rxfh(struct net_device *dev,
1990                          struct ethtool_rxfh_param *rxfh,
1991                          struct netlink_ext_ack *extack)
1992 {
1993         struct bnxt *bp = netdev_priv(dev);
1994         int rc = 0;
1995
1996         if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
1997                 return -EOPNOTSUPP;
1998
1999         bnxt_modify_rss(bp, NULL, NULL, rxfh);
2000
2001         if (netif_running(bp->dev)) {
2002                 bnxt_close_nic(bp, false, false);
2003                 rc = bnxt_open_nic(bp, false, false);
2004         }
2005         return rc;
2006 }
2007
2008 static void bnxt_get_drvinfo(struct net_device *dev,
2009                              struct ethtool_drvinfo *info)
2010 {
2011         struct bnxt *bp = netdev_priv(dev);
2012
2013         strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
2014         strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
2015         strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
2016         info->n_stats = bnxt_get_num_stats(bp);
2017         info->testinfo_len = bp->num_tests;
2018         /* TODO CHIMP_FW: eeprom dump details */
2019         info->eedump_len = 0;
2020         /* TODO CHIMP FW: reg dump details */
2021         info->regdump_len = 0;
2022 }
2023
2024 static int bnxt_get_regs_len(struct net_device *dev)
2025 {
2026         struct bnxt *bp = netdev_priv(dev);
2027         int reg_len;
2028
2029         if (!BNXT_PF(bp))
2030                 return -EOPNOTSUPP;
2031
2032         reg_len = BNXT_PXP_REG_LEN;
2033
2034         if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
2035                 reg_len += sizeof(struct pcie_ctx_hw_stats);
2036
2037         return reg_len;
2038 }
2039
2040 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2041                           void *_p)
2042 {
2043         struct pcie_ctx_hw_stats *hw_pcie_stats;
2044         struct hwrm_pcie_qstats_input *req;
2045         struct bnxt *bp = netdev_priv(dev);
2046         dma_addr_t hw_pcie_stats_addr;
2047         int rc;
2048
2049         regs->version = 0;
2050         bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
2051
2052         if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
2053                 return;
2054
2055         if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
2056                 return;
2057
2058         hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
2059                                            &hw_pcie_stats_addr);
2060         if (!hw_pcie_stats) {
2061                 hwrm_req_drop(bp, req);
2062                 return;
2063         }
2064
2065         regs->version = 1;
2066         hwrm_req_hold(bp, req); /* hold on to slice */
2067         req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
2068         req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
2069         rc = hwrm_req_send(bp, req);
2070         if (!rc) {
2071                 __le64 *src = (__le64 *)hw_pcie_stats;
2072                 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
2073                 int i;
2074
2075                 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
2076                         dst[i] = le64_to_cpu(src[i]);
2077         }
2078         hwrm_req_drop(bp, req);
2079 }
2080
2081 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2082 {
2083         struct bnxt *bp = netdev_priv(dev);
2084
2085         wol->supported = 0;
2086         wol->wolopts = 0;
2087         memset(&wol->sopass, 0, sizeof(wol->sopass));
2088         if (bp->flags & BNXT_FLAG_WOL_CAP) {
2089                 wol->supported = WAKE_MAGIC;
2090                 if (bp->wol)
2091                         wol->wolopts = WAKE_MAGIC;
2092         }
2093 }
2094
2095 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2096 {
2097         struct bnxt *bp = netdev_priv(dev);
2098
2099         if (wol->wolopts & ~WAKE_MAGIC)
2100                 return -EINVAL;
2101
2102         if (wol->wolopts & WAKE_MAGIC) {
2103                 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
2104                         return -EINVAL;
2105                 if (!bp->wol) {
2106                         if (bnxt_hwrm_alloc_wol_fltr(bp))
2107                                 return -EBUSY;
2108                         bp->wol = 1;
2109                 }
2110         } else {
2111                 if (bp->wol) {
2112                         if (bnxt_hwrm_free_wol_fltr(bp))
2113                                 return -EBUSY;
2114                         bp->wol = 0;
2115                 }
2116         }
2117         return 0;
2118 }
2119
2120 /* TODO: support 25GB, 40GB, 50GB with different cable type */
2121 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
2122 {
2123         linkmode_zero(mode);
2124
2125         if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
2126                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
2127         if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
2128                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
2129         if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
2130                 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
2131         if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
2132                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
2133         if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
2134                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
2135 }
2136
2137 enum bnxt_media_type {
2138         BNXT_MEDIA_UNKNOWN = 0,
2139         BNXT_MEDIA_TP,
2140         BNXT_MEDIA_CR,
2141         BNXT_MEDIA_SR,
2142         BNXT_MEDIA_LR_ER_FR,
2143         BNXT_MEDIA_KR,
2144         BNXT_MEDIA_KX,
2145         BNXT_MEDIA_X,
2146         __BNXT_MEDIA_END,
2147 };
2148
2149 static const enum bnxt_media_type bnxt_phy_types[] = {
2150         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
2151         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] =  BNXT_MEDIA_KR,
2152         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
2153         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
2154         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
2155         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
2156         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
2157         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
2158         [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
2159         [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
2160         [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
2161         [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
2162         [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
2163         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
2164         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
2165         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2166         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2167         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
2168         [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
2169         [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
2170         [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2171         [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2172         [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
2173         [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
2174         [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
2175         [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
2176         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
2177         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
2178         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2179         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2180         [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
2181         [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
2182         [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
2183         [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
2184         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
2185         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
2186         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
2187         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
2188         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
2189         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
2190         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
2191         [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
2192         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
2193         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
2194         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
2195         [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
2196         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
2197         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
2198         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
2199         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
2200         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
2201         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
2202         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2203         [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2204 };
2205
2206 static enum bnxt_media_type
2207 bnxt_get_media(struct bnxt_link_info *link_info)
2208 {
2209         switch (link_info->media_type) {
2210         case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
2211                 return BNXT_MEDIA_TP;
2212         case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
2213                 return BNXT_MEDIA_CR;
2214         default:
2215                 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
2216                         return bnxt_phy_types[link_info->phy_type];
2217                 return BNXT_MEDIA_UNKNOWN;
2218         }
2219 }
2220
2221 enum bnxt_link_speed_indices {
2222         BNXT_LINK_SPEED_UNKNOWN = 0,
2223         BNXT_LINK_SPEED_100MB_IDX,
2224         BNXT_LINK_SPEED_1GB_IDX,
2225         BNXT_LINK_SPEED_10GB_IDX,
2226         BNXT_LINK_SPEED_25GB_IDX,
2227         BNXT_LINK_SPEED_40GB_IDX,
2228         BNXT_LINK_SPEED_50GB_IDX,
2229         BNXT_LINK_SPEED_100GB_IDX,
2230         BNXT_LINK_SPEED_200GB_IDX,
2231         BNXT_LINK_SPEED_400GB_IDX,
2232         __BNXT_LINK_SPEED_END
2233 };
2234
2235 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
2236 {
2237         switch (speed) {
2238         case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
2239         case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
2240         case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
2241         case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
2242         case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
2243         case BNXT_LINK_SPEED_50GB:
2244         case BNXT_LINK_SPEED_50GB_PAM4:
2245                 return BNXT_LINK_SPEED_50GB_IDX;
2246         case BNXT_LINK_SPEED_100GB:
2247         case BNXT_LINK_SPEED_100GB_PAM4:
2248         case BNXT_LINK_SPEED_100GB_PAM4_112:
2249                 return BNXT_LINK_SPEED_100GB_IDX;
2250         case BNXT_LINK_SPEED_200GB:
2251         case BNXT_LINK_SPEED_200GB_PAM4:
2252         case BNXT_LINK_SPEED_200GB_PAM4_112:
2253                 return BNXT_LINK_SPEED_200GB_IDX;
2254         case BNXT_LINK_SPEED_400GB:
2255         case BNXT_LINK_SPEED_400GB_PAM4:
2256         case BNXT_LINK_SPEED_400GB_PAM4_112:
2257                 return BNXT_LINK_SPEED_400GB_IDX;
2258         default: return BNXT_LINK_SPEED_UNKNOWN;
2259         }
2260 }
2261
2262 static const enum ethtool_link_mode_bit_indices
2263 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
2264         [BNXT_LINK_SPEED_100MB_IDX] = {
2265                 {
2266                         [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2267                 },
2268         },
2269         [BNXT_LINK_SPEED_1GB_IDX] = {
2270                 {
2271                         [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
2272                         /* historically baseT, but DAC is more correctly baseX */
2273                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
2274                         [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2275                         [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
2276                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2277                 },
2278         },
2279         [BNXT_LINK_SPEED_10GB_IDX] = {
2280                 {
2281                         [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2282                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
2283                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
2284                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
2285                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2286                         [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2287                 },
2288         },
2289         [BNXT_LINK_SPEED_25GB_IDX] = {
2290                 {
2291                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2292                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2293                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2294                 },
2295         },
2296         [BNXT_LINK_SPEED_40GB_IDX] = {
2297                 {
2298                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2299                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2300                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2301                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2302                 },
2303         },
2304         [BNXT_LINK_SPEED_50GB_IDX] = {
2305                 [BNXT_SIG_MODE_NRZ] = {
2306                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2307                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2308                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2309                 },
2310                 [BNXT_SIG_MODE_PAM4] = {
2311                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
2312                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
2313                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
2314                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
2315                 },
2316         },
2317         [BNXT_LINK_SPEED_100GB_IDX] = {
2318                 [BNXT_SIG_MODE_NRZ] = {
2319                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2320                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2321                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2322                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2323                 },
2324                 [BNXT_SIG_MODE_PAM4] = {
2325                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
2326                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
2327                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
2328                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
2329                 },
2330                 [BNXT_SIG_MODE_PAM4_112] = {
2331                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
2332                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
2333                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
2334                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
2335                 },
2336         },
2337         [BNXT_LINK_SPEED_200GB_IDX] = {
2338                 [BNXT_SIG_MODE_PAM4] = {
2339                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
2340                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
2341                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
2342                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
2343                 },
2344                 [BNXT_SIG_MODE_PAM4_112] = {
2345                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
2346                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
2347                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
2348                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
2349                 },
2350         },
2351         [BNXT_LINK_SPEED_400GB_IDX] = {
2352                 [BNXT_SIG_MODE_PAM4] = {
2353                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
2354                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
2355                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
2356                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
2357                 },
2358                 [BNXT_SIG_MODE_PAM4_112] = {
2359                         [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
2360                         [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
2361                         [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
2362                         [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
2363                 },
2364         },
2365 };
2366
2367 #define BNXT_LINK_MODE_UNKNOWN -1
2368
2369 static enum ethtool_link_mode_bit_indices
2370 bnxt_get_link_mode(struct bnxt_link_info *link_info)
2371 {
2372         enum ethtool_link_mode_bit_indices link_mode;
2373         enum bnxt_link_speed_indices speed;
2374         enum bnxt_media_type media;
2375         u8 sig_mode;
2376
2377         if (link_info->phy_link_status != BNXT_LINK_LINK)
2378                 return BNXT_LINK_MODE_UNKNOWN;
2379
2380         media = bnxt_get_media(link_info);
2381         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
2382                 speed = bnxt_fw_speed_idx(link_info->link_speed);
2383                 sig_mode = link_info->active_fec_sig_mode &
2384                         PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
2385         } else {
2386                 speed = bnxt_fw_speed_idx(link_info->req_link_speed);
2387                 sig_mode = link_info->req_signal_mode;
2388         }
2389         if (sig_mode >= BNXT_SIG_MODE_MAX)
2390                 return BNXT_LINK_MODE_UNKNOWN;
2391
2392         /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
2393          * link mode, but since no such devices exist, the zeroes in the
2394          * map can be conveniently used to represent unknown link modes.
2395          */
2396         link_mode = bnxt_link_modes[speed][sig_mode][media];
2397         if (!link_mode)
2398                 return BNXT_LINK_MODE_UNKNOWN;
2399
2400         switch (link_mode) {
2401         case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
2402                 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2403                         link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
2404                 break;
2405         case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
2406                 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2407                         link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
2408                 break;
2409         default:
2410                 break;
2411         }
2412
2413         return link_mode;
2414 }
2415
2416 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
2417                                    struct ethtool_link_ksettings *lk_ksettings)
2418 {
2419         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2420
2421         if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
2422                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2423                                  lk_ksettings->link_modes.supported);
2424                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2425                                  lk_ksettings->link_modes.supported);
2426         }
2427
2428         if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
2429             link_info->support_pam4_auto_speeds)
2430                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2431                                  lk_ksettings->link_modes.supported);
2432
2433         if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2434                 return;
2435
2436         if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
2437                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2438                                  lk_ksettings->link_modes.advertising);
2439         if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
2440                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2441                                  lk_ksettings->link_modes.advertising);
2442         if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
2443                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2444                                  lk_ksettings->link_modes.lp_advertising);
2445         if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
2446                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2447                                  lk_ksettings->link_modes.lp_advertising);
2448 }
2449
2450 static const u16 bnxt_nrz_speed_masks[] = {
2451         [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
2452         [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
2453         [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
2454         [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
2455         [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
2456         [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
2457         [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
2458         [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2459 };
2460
2461 static const u16 bnxt_pam4_speed_masks[] = {
2462         [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
2463         [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
2464         [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
2465         [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2466 };
2467
2468 static const u16 bnxt_nrz_speeds2_masks[] = {
2469         [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
2470         [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
2471         [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
2472         [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
2473         [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
2474         [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
2475         [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2476 };
2477
2478 static const u16 bnxt_pam4_speeds2_masks[] = {
2479         [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
2480         [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
2481         [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
2482         [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
2483 };
2484
2485 static const u16 bnxt_pam4_112_speeds2_masks[] = {
2486         [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
2487         [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
2488         [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
2489 };
2490
2491 static enum bnxt_link_speed_indices
2492 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
2493 {
2494         const u16 *speeds;
2495         int idx, len;
2496
2497         switch (sig_mode) {
2498         case BNXT_SIG_MODE_NRZ:
2499                 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2500                         speeds = bnxt_nrz_speeds2_masks;
2501                         len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
2502                 } else {
2503                         speeds = bnxt_nrz_speed_masks;
2504                         len = ARRAY_SIZE(bnxt_nrz_speed_masks);
2505                 }
2506                 break;
2507         case BNXT_SIG_MODE_PAM4:
2508                 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2509                         speeds = bnxt_pam4_speeds2_masks;
2510                         len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
2511                 } else {
2512                         speeds = bnxt_pam4_speed_masks;
2513                         len = ARRAY_SIZE(bnxt_pam4_speed_masks);
2514                 }
2515                 break;
2516         case BNXT_SIG_MODE_PAM4_112:
2517                 speeds = bnxt_pam4_112_speeds2_masks;
2518                 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
2519                 break;
2520         default:
2521                 return BNXT_LINK_SPEED_UNKNOWN;
2522         }
2523
2524         for (idx = 0; idx < len; idx++) {
2525                 if (speeds[idx] == speed_msk)
2526                         return idx;
2527         }
2528
2529         return BNXT_LINK_SPEED_UNKNOWN;
2530 }
2531
2532 #define BNXT_FW_SPEED_MSK_BITS 16
2533
2534 static void
2535 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2536                           u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2537 {
2538         enum ethtool_link_mode_bit_indices link_mode;
2539         enum bnxt_link_speed_indices speed;
2540         u8 bit;
2541
2542         for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
2543                 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
2544                 if (!speed)
2545                         continue;
2546
2547                 link_mode = bnxt_link_modes[speed][sig_mode][media];
2548                 if (!link_mode)
2549                         continue;
2550
2551                 linkmode_set_bit(link_mode, et_mask);
2552         }
2553 }
2554
2555 static void
2556 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2557                         u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2558 {
2559         if (media) {
2560                 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2561                                           et_mask);
2562                 return;
2563         }
2564
2565         /* list speeds for all media if unknown */
2566         for (media = 1; media < __BNXT_MEDIA_END; media++)
2567                 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2568                                           et_mask);
2569 }
2570
2571 static void
2572 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
2573                                     enum bnxt_media_type media,
2574                                     struct ethtool_link_ksettings *lk_ksettings)
2575 {
2576         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2577         u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2578         u16 phy_flags = bp->phy_flags;
2579
2580         if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2581                 sp_nrz = link_info->support_speeds2;
2582                 sp_pam4 = link_info->support_speeds2;
2583                 sp_pam4_112 = link_info->support_speeds2;
2584         } else {
2585                 sp_nrz = link_info->support_speeds;
2586                 sp_pam4 = link_info->support_pam4_speeds;
2587         }
2588         bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2589                                 lk_ksettings->link_modes.supported);
2590         bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2591                                 lk_ksettings->link_modes.supported);
2592         bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2593                                 phy_flags, lk_ksettings->link_modes.supported);
2594 }
2595
2596 static void
2597 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
2598                                 enum bnxt_media_type media,
2599                                 struct ethtool_link_ksettings *lk_ksettings)
2600 {
2601         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2602         u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2603         u16 phy_flags = bp->phy_flags;
2604
2605         sp_nrz = link_info->advertising;
2606         if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2607                 sp_pam4 = link_info->advertising;
2608                 sp_pam4_112 = link_info->advertising;
2609         } else {
2610                 sp_pam4 = link_info->advertising_pam4;
2611         }
2612         bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2613                                 lk_ksettings->link_modes.advertising);
2614         bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2615                                 lk_ksettings->link_modes.advertising);
2616         bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2617                                 phy_flags, lk_ksettings->link_modes.advertising);
2618 }
2619
2620 static void
2621 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
2622                                enum bnxt_media_type media,
2623                                struct ethtool_link_ksettings *lk_ksettings)
2624 {
2625         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2626         u16 phy_flags = bp->phy_flags;
2627
2628         bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
2629                                 BNXT_SIG_MODE_NRZ, phy_flags,
2630                                 lk_ksettings->link_modes.lp_advertising);
2631         bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
2632                                 BNXT_SIG_MODE_PAM4, phy_flags,
2633                                 lk_ksettings->link_modes.lp_advertising);
2634 }
2635
2636 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
2637                               u16 speed_msk, const unsigned long *et_mask,
2638                               enum ethtool_link_mode_bit_indices mode)
2639 {
2640         bool mode_desired = linkmode_test_bit(mode, et_mask);
2641
2642         if (!mode)
2643                 return;
2644
2645         /* enabled speeds for installed media should override */
2646         if (installed_media && mode_desired) {
2647                 *speeds |= speed_msk;
2648                 *delta |= speed_msk;
2649                 return;
2650         }
2651
2652         /* many to one mapping, only allow one change per fw_speed bit */
2653         if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
2654                 *speeds ^= speed_msk;
2655                 *delta |= speed_msk;
2656         }
2657 }
2658
2659 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
2660                                     const unsigned long *et_mask)
2661 {
2662         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2663         u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
2664         enum bnxt_media_type media = bnxt_get_media(link_info);
2665         u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
2666         u32 delta_pam4_112 = 0;
2667         u32 delta_pam4 = 0;
2668         u32 delta_nrz = 0;
2669         int i, m;
2670
2671         adv = &link_info->advertising;
2672         if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2673                 adv_pam4 = &link_info->advertising;
2674                 adv_pam4_112 = &link_info->advertising;
2675                 sp_msks = bnxt_nrz_speeds2_masks;
2676                 sp_pam4_msks = bnxt_pam4_speeds2_masks;
2677                 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
2678         } else {
2679                 adv_pam4 = &link_info->advertising_pam4;
2680                 sp_msks = bnxt_nrz_speed_masks;
2681                 sp_pam4_msks = bnxt_pam4_speed_masks;
2682         }
2683         for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
2684                 /* accept any legal media from user */
2685                 for (m = 1; m < __BNXT_MEDIA_END; m++) {
2686                         bnxt_update_speed(&delta_nrz, m == media,
2687                                           adv, sp_msks[i], et_mask,
2688                                           bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
2689                         bnxt_update_speed(&delta_pam4, m == media,
2690                                           adv_pam4, sp_pam4_msks[i], et_mask,
2691                                           bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
2692                         if (!adv_pam4_112)
2693                                 continue;
2694
2695                         bnxt_update_speed(&delta_pam4_112, m == media,
2696                                           adv_pam4_112, sp_pam4_112_msks[i], et_mask,
2697                                           bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
2698                 }
2699         }
2700 }
2701
2702 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
2703                                 struct ethtool_link_ksettings *lk_ksettings)
2704 {
2705         u16 fec_cfg = link_info->fec_cfg;
2706
2707         if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
2708                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2709                                  lk_ksettings->link_modes.advertising);
2710                 return;
2711         }
2712         if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2713                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2714                                  lk_ksettings->link_modes.advertising);
2715         if (fec_cfg & BNXT_FEC_ENC_RS)
2716                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2717                                  lk_ksettings->link_modes.advertising);
2718         if (fec_cfg & BNXT_FEC_ENC_LLRS)
2719                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2720                                  lk_ksettings->link_modes.advertising);
2721 }
2722
2723 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
2724                                 struct ethtool_link_ksettings *lk_ksettings)
2725 {
2726         u16 fec_cfg = link_info->fec_cfg;
2727
2728         if (fec_cfg & BNXT_FEC_NONE) {
2729                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2730                                  lk_ksettings->link_modes.supported);
2731                 return;
2732         }
2733         if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
2734                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2735                                  lk_ksettings->link_modes.supported);
2736         if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
2737                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2738                                  lk_ksettings->link_modes.supported);
2739         if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
2740                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2741                                  lk_ksettings->link_modes.supported);
2742 }
2743
2744 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
2745 {
2746         switch (fw_link_speed) {
2747         case BNXT_LINK_SPEED_100MB:
2748                 return SPEED_100;
2749         case BNXT_LINK_SPEED_1GB:
2750                 return SPEED_1000;
2751         case BNXT_LINK_SPEED_2_5GB:
2752                 return SPEED_2500;
2753         case BNXT_LINK_SPEED_10GB:
2754                 return SPEED_10000;
2755         case BNXT_LINK_SPEED_20GB:
2756                 return SPEED_20000;
2757         case BNXT_LINK_SPEED_25GB:
2758                 return SPEED_25000;
2759         case BNXT_LINK_SPEED_40GB:
2760                 return SPEED_40000;
2761         case BNXT_LINK_SPEED_50GB:
2762         case BNXT_LINK_SPEED_50GB_PAM4:
2763                 return SPEED_50000;
2764         case BNXT_LINK_SPEED_100GB:
2765         case BNXT_LINK_SPEED_100GB_PAM4:
2766         case BNXT_LINK_SPEED_100GB_PAM4_112:
2767                 return SPEED_100000;
2768         case BNXT_LINK_SPEED_200GB:
2769         case BNXT_LINK_SPEED_200GB_PAM4:
2770         case BNXT_LINK_SPEED_200GB_PAM4_112:
2771                 return SPEED_200000;
2772         case BNXT_LINK_SPEED_400GB:
2773         case BNXT_LINK_SPEED_400GB_PAM4:
2774         case BNXT_LINK_SPEED_400GB_PAM4_112:
2775                 return SPEED_400000;
2776         default:
2777                 return SPEED_UNKNOWN;
2778         }
2779 }
2780
2781 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
2782                                     struct bnxt_link_info *link_info)
2783 {
2784         struct ethtool_link_settings *base = &lk_ksettings->base;
2785
2786         if (link_info->link_state == BNXT_LINK_STATE_UP) {
2787                 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
2788                 base->duplex = DUPLEX_HALF;
2789                 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2790                         base->duplex = DUPLEX_FULL;
2791                 lk_ksettings->lanes = link_info->active_lanes;
2792         } else if (!link_info->autoneg) {
2793                 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
2794                 base->duplex = DUPLEX_HALF;
2795                 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
2796                         base->duplex = DUPLEX_FULL;
2797         }
2798 }
2799
2800 static int bnxt_get_link_ksettings(struct net_device *dev,
2801                                    struct ethtool_link_ksettings *lk_ksettings)
2802 {
2803         struct ethtool_link_settings *base = &lk_ksettings->base;
2804         enum ethtool_link_mode_bit_indices link_mode;
2805         struct bnxt *bp = netdev_priv(dev);
2806         struct bnxt_link_info *link_info;
2807         enum bnxt_media_type media;
2808
2809         ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
2810         ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
2811         ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
2812         base->duplex = DUPLEX_UNKNOWN;
2813         base->speed = SPEED_UNKNOWN;
2814         link_info = &bp->link_info;
2815
2816         mutex_lock(&bp->link_lock);
2817         bnxt_get_ethtool_modes(link_info, lk_ksettings);
2818         media = bnxt_get_media(link_info);
2819         bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
2820         bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
2821         link_mode = bnxt_get_link_mode(link_info);
2822         if (link_mode != BNXT_LINK_MODE_UNKNOWN)
2823                 ethtool_params_from_link_mode(lk_ksettings, link_mode);
2824         else
2825                 bnxt_get_default_speeds(lk_ksettings, link_info);
2826
2827         if (link_info->autoneg) {
2828                 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
2829                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2830                                  lk_ksettings->link_modes.advertising);
2831                 base->autoneg = AUTONEG_ENABLE;
2832                 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
2833                 if (link_info->phy_link_status == BNXT_LINK_LINK)
2834                         bnxt_get_all_ethtool_lp_speeds(link_info, media,
2835                                                        lk_ksettings);
2836         } else {
2837                 base->autoneg = AUTONEG_DISABLE;
2838         }
2839
2840         base->port = PORT_NONE;
2841         if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
2842                 base->port = PORT_TP;
2843                 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2844                                  lk_ksettings->link_modes.supported);
2845                 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2846                                  lk_ksettings->link_modes.advertising);
2847         } else {
2848                 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2849                                  lk_ksettings->link_modes.supported);
2850                 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2851                                  lk_ksettings->link_modes.advertising);
2852
2853                 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
2854                         base->port = PORT_DA;
2855                 else
2856                         base->port = PORT_FIBRE;
2857         }
2858         base->phy_address = link_info->phy_addr;
2859         mutex_unlock(&bp->link_lock);
2860
2861         return 0;
2862 }
2863
2864 static int
2865 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
2866 {
2867         struct bnxt *bp = netdev_priv(dev);
2868         struct bnxt_link_info *link_info = &bp->link_info;
2869         u16 support_pam4_spds = link_info->support_pam4_speeds;
2870         u16 support_spds2 = link_info->support_speeds2;
2871         u16 support_spds = link_info->support_speeds;
2872         u8 sig_mode = BNXT_SIG_MODE_NRZ;
2873         u32 lanes_needed = 1;
2874         u16 fw_speed = 0;
2875
2876         switch (ethtool_speed) {
2877         case SPEED_100:
2878                 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
2879                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
2880                 break;
2881         case SPEED_1000:
2882                 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
2883                     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
2884                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2885                 break;
2886         case SPEED_2500:
2887                 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
2888                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
2889                 break;
2890         case SPEED_10000:
2891                 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
2892                     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
2893                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2894                 break;
2895         case SPEED_20000:
2896                 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
2897                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
2898                         lanes_needed = 2;
2899                 }
2900                 break;
2901         case SPEED_25000:
2902                 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
2903                     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
2904                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2905                 break;
2906         case SPEED_40000:
2907                 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
2908                     (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
2909                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2910                         lanes_needed = 4;
2911                 }
2912                 break;
2913         case SPEED_50000:
2914                 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
2915                      (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
2916                     lanes != 1) {
2917                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2918                         lanes_needed = 2;
2919                 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
2920                         fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
2921                         sig_mode = BNXT_SIG_MODE_PAM4;
2922                 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
2923                         fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
2924                         sig_mode = BNXT_SIG_MODE_PAM4;
2925                 }
2926                 break;
2927         case SPEED_100000:
2928                 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
2929                      (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
2930                     lanes != 2 && lanes != 1) {
2931                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
2932                         lanes_needed = 4;
2933                 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
2934                         fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
2935                         sig_mode = BNXT_SIG_MODE_PAM4;
2936                         lanes_needed = 2;
2937                 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
2938                            lanes != 1) {
2939                         fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
2940                         sig_mode = BNXT_SIG_MODE_PAM4;
2941                         lanes_needed = 2;
2942                 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
2943                         fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
2944                         sig_mode = BNXT_SIG_MODE_PAM4_112;
2945                 }
2946                 break;
2947         case SPEED_200000:
2948                 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
2949                         fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
2950                         sig_mode = BNXT_SIG_MODE_PAM4;
2951                         lanes_needed = 4;
2952                 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
2953                            lanes != 2) {
2954                         fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
2955                         sig_mode = BNXT_SIG_MODE_PAM4;
2956                         lanes_needed = 4;
2957                 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
2958                         fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
2959                         sig_mode = BNXT_SIG_MODE_PAM4_112;
2960                         lanes_needed = 2;
2961                 }
2962                 break;
2963         case SPEED_400000:
2964                 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
2965                     lanes != 4) {
2966                         fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
2967                         sig_mode = BNXT_SIG_MODE_PAM4;
2968                         lanes_needed = 8;
2969                 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
2970                         fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
2971                         sig_mode = BNXT_SIG_MODE_PAM4_112;
2972                         lanes_needed = 4;
2973                 }
2974                 break;
2975         }
2976
2977         if (!fw_speed) {
2978                 netdev_err(dev, "unsupported speed!\n");
2979                 return -EINVAL;
2980         }
2981
2982         if (lanes && lanes != lanes_needed) {
2983                 netdev_err(dev, "unsupported number of lanes for speed\n");
2984                 return -EINVAL;
2985         }
2986
2987         if (link_info->req_link_speed == fw_speed &&
2988             link_info->req_signal_mode == sig_mode &&
2989             link_info->autoneg == 0)
2990                 return -EALREADY;
2991
2992         link_info->req_link_speed = fw_speed;
2993         link_info->req_signal_mode = sig_mode;
2994         link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
2995         link_info->autoneg = 0;
2996         link_info->advertising = 0;
2997         link_info->advertising_pam4 = 0;
2998
2999         return 0;
3000 }
3001
3002 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
3003 {
3004         u16 fw_speed_mask = 0;
3005
3006         if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
3007             linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
3008                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
3009
3010         if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
3011             linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
3012                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
3013
3014         if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
3015                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
3016
3017         if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
3018                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
3019
3020         return fw_speed_mask;
3021 }
3022
3023 static int bnxt_set_link_ksettings(struct net_device *dev,
3024                            const struct ethtool_link_ksettings *lk_ksettings)
3025 {
3026         struct bnxt *bp = netdev_priv(dev);
3027         struct bnxt_link_info *link_info = &bp->link_info;
3028         const struct ethtool_link_settings *base = &lk_ksettings->base;
3029         bool set_pause = false;
3030         u32 speed, lanes = 0;
3031         int rc = 0;
3032
3033         if (!BNXT_PHY_CFG_ABLE(bp))
3034                 return -EOPNOTSUPP;
3035
3036         mutex_lock(&bp->link_lock);
3037         if (base->autoneg == AUTONEG_ENABLE) {
3038                 bnxt_set_ethtool_speeds(link_info,
3039                                         lk_ksettings->link_modes.advertising);
3040                 link_info->autoneg |= BNXT_AUTONEG_SPEED;
3041                 if (!link_info->advertising && !link_info->advertising_pam4) {
3042                         link_info->advertising = link_info->support_auto_speeds;
3043                         link_info->advertising_pam4 =
3044                                 link_info->support_pam4_auto_speeds;
3045                 }
3046                 /* any change to autoneg will cause link change, therefore the
3047                  * driver should put back the original pause setting in autoneg
3048                  */
3049                 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
3050                         set_pause = true;
3051         } else {
3052                 u8 phy_type = link_info->phy_type;
3053
3054                 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
3055                     phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
3056                     link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
3057                         netdev_err(dev, "10GBase-T devices must autoneg\n");
3058                         rc = -EINVAL;
3059                         goto set_setting_exit;
3060                 }
3061                 if (base->duplex == DUPLEX_HALF) {
3062                         netdev_err(dev, "HALF DUPLEX is not supported!\n");
3063                         rc = -EINVAL;
3064                         goto set_setting_exit;
3065                 }
3066                 speed = base->speed;
3067                 lanes = lk_ksettings->lanes;
3068                 rc = bnxt_force_link_speed(dev, speed, lanes);
3069                 if (rc) {
3070                         if (rc == -EALREADY)
3071                                 rc = 0;
3072                         goto set_setting_exit;
3073                 }
3074         }
3075
3076         if (netif_running(dev))
3077                 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
3078
3079 set_setting_exit:
3080         mutex_unlock(&bp->link_lock);
3081         return rc;
3082 }
3083
3084 static int bnxt_get_fecparam(struct net_device *dev,
3085                              struct ethtool_fecparam *fec)
3086 {
3087         struct bnxt *bp = netdev_priv(dev);
3088         struct bnxt_link_info *link_info;
3089         u8 active_fec;
3090         u16 fec_cfg;
3091
3092         link_info = &bp->link_info;
3093         fec_cfg = link_info->fec_cfg;
3094         active_fec = link_info->active_fec_sig_mode &
3095                      PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
3096         if (fec_cfg & BNXT_FEC_NONE) {
3097                 fec->fec = ETHTOOL_FEC_NONE;
3098                 fec->active_fec = ETHTOOL_FEC_NONE;
3099                 return 0;
3100         }
3101         if (fec_cfg & BNXT_FEC_AUTONEG)
3102                 fec->fec |= ETHTOOL_FEC_AUTO;
3103         if (fec_cfg & BNXT_FEC_ENC_BASE_R)
3104                 fec->fec |= ETHTOOL_FEC_BASER;
3105         if (fec_cfg & BNXT_FEC_ENC_RS)
3106                 fec->fec |= ETHTOOL_FEC_RS;
3107         if (fec_cfg & BNXT_FEC_ENC_LLRS)
3108                 fec->fec |= ETHTOOL_FEC_LLRS;
3109
3110         switch (active_fec) {
3111         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
3112                 fec->active_fec |= ETHTOOL_FEC_BASER;
3113                 break;
3114         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
3115         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
3116         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
3117                 fec->active_fec |= ETHTOOL_FEC_RS;
3118                 break;
3119         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
3120         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
3121                 fec->active_fec |= ETHTOOL_FEC_LLRS;
3122                 break;
3123         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
3124                 fec->active_fec |= ETHTOOL_FEC_OFF;
3125                 break;
3126         }
3127         return 0;
3128 }
3129
3130 static void bnxt_get_fec_stats(struct net_device *dev,
3131                                struct ethtool_fec_stats *fec_stats)
3132 {
3133         struct bnxt *bp = netdev_priv(dev);
3134         u64 *rx;
3135
3136         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
3137                 return;
3138
3139         rx = bp->rx_port_stats_ext.sw_stats;
3140         fec_stats->corrected_bits.total =
3141                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
3142
3143         if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
3144                 return;
3145
3146         fec_stats->corrected_blocks.total =
3147                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
3148         fec_stats->uncorrectable_blocks.total =
3149                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
3150 }
3151
3152 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
3153                                          u32 fec)
3154 {
3155         u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
3156
3157         if (fec & ETHTOOL_FEC_BASER)
3158                 fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
3159         else if (fec & ETHTOOL_FEC_RS)
3160                 fw_fec |= BNXT_FEC_RS_ON(link_info);
3161         else if (fec & ETHTOOL_FEC_LLRS)
3162                 fw_fec |= BNXT_FEC_LLRS_ON;
3163         return fw_fec;
3164 }
3165
3166 static int bnxt_set_fecparam(struct net_device *dev,
3167                              struct ethtool_fecparam *fecparam)
3168 {
3169         struct hwrm_port_phy_cfg_input *req;
3170         struct bnxt *bp = netdev_priv(dev);
3171         struct bnxt_link_info *link_info;
3172         u32 new_cfg, fec = fecparam->fec;
3173         u16 fec_cfg;
3174         int rc;
3175
3176         link_info = &bp->link_info;
3177         fec_cfg = link_info->fec_cfg;
3178         if (fec_cfg & BNXT_FEC_NONE)
3179                 return -EOPNOTSUPP;
3180
3181         if (fec & ETHTOOL_FEC_OFF) {
3182                 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
3183                           BNXT_FEC_ALL_OFF(link_info);
3184                 goto apply_fec;
3185         }
3186         if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
3187             ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
3188             ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
3189             ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
3190                 return -EINVAL;
3191
3192         if (fec & ETHTOOL_FEC_AUTO) {
3193                 if (!link_info->autoneg)
3194                         return -EINVAL;
3195                 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
3196         } else {
3197                 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
3198         }
3199
3200 apply_fec:
3201         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
3202         if (rc)
3203                 return rc;
3204         req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3205         rc = hwrm_req_send(bp, req);
3206         /* update current settings */
3207         if (!rc) {
3208                 mutex_lock(&bp->link_lock);
3209                 bnxt_update_link(bp, false);
3210                 mutex_unlock(&bp->link_lock);
3211         }
3212         return rc;
3213 }
3214
3215 static void bnxt_get_pauseparam(struct net_device *dev,
3216                                 struct ethtool_pauseparam *epause)
3217 {
3218         struct bnxt *bp = netdev_priv(dev);
3219         struct bnxt_link_info *link_info = &bp->link_info;
3220
3221         if (BNXT_VF(bp))
3222                 return;
3223         epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
3224         epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
3225         epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
3226 }
3227
3228 static void bnxt_get_pause_stats(struct net_device *dev,
3229                                  struct ethtool_pause_stats *epstat)
3230 {
3231         struct bnxt *bp = netdev_priv(dev);
3232         u64 *rx, *tx;
3233
3234         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
3235                 return;
3236
3237         rx = bp->port_stats.sw_stats;
3238         tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3239
3240         epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
3241         epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
3242 }
3243
3244 static int bnxt_set_pauseparam(struct net_device *dev,
3245                                struct ethtool_pauseparam *epause)
3246 {
3247         int rc = 0;
3248         struct bnxt *bp = netdev_priv(dev);
3249         struct bnxt_link_info *link_info = &bp->link_info;
3250
3251         if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
3252                 return -EOPNOTSUPP;
3253
3254         mutex_lock(&bp->link_lock);
3255         if (epause->autoneg) {
3256                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3257                         rc = -EINVAL;
3258                         goto pause_exit;
3259                 }
3260
3261                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
3262                 link_info->req_flow_ctrl = 0;
3263         } else {
3264                 /* when transition from auto pause to force pause,
3265                  * force a link change
3266                  */
3267                 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
3268                         link_info->force_link_chng = true;
3269                 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
3270                 link_info->req_flow_ctrl = 0;
3271         }
3272         if (epause->rx_pause)
3273                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
3274
3275         if (epause->tx_pause)
3276                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
3277
3278         if (netif_running(dev))
3279                 rc = bnxt_hwrm_set_pause(bp);
3280
3281 pause_exit:
3282         mutex_unlock(&bp->link_lock);
3283         return rc;
3284 }
3285
3286 static u32 bnxt_get_link(struct net_device *dev)
3287 {
3288         struct bnxt *bp = netdev_priv(dev);
3289
3290         /* TODO: handle MF, VF, driver close case */
3291         return BNXT_LINK_IS_UP(bp);
3292 }
3293
3294 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
3295                                struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
3296 {
3297         struct hwrm_nvm_get_dev_info_output *resp;
3298         struct hwrm_nvm_get_dev_info_input *req;
3299         int rc;
3300
3301         if (BNXT_VF(bp))
3302                 return -EOPNOTSUPP;
3303
3304         rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
3305         if (rc)
3306                 return rc;
3307
3308         resp = hwrm_req_hold(bp, req);
3309         rc = hwrm_req_send(bp, req);
3310         if (!rc)
3311                 memcpy(nvm_dev_info, resp, sizeof(*resp));
3312         hwrm_req_drop(bp, req);
3313         return rc;
3314 }
3315
3316 static void bnxt_print_admin_err(struct bnxt *bp)
3317 {
3318         netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
3319 }
3320
3321 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
3322                          u16 ext, u16 *index, u32 *item_length,
3323                          u32 *data_length);
3324
3325 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
3326                      u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
3327                      u32 dir_item_len, const u8 *data,
3328                      size_t data_len)
3329 {
3330         struct bnxt *bp = netdev_priv(dev);
3331         struct hwrm_nvm_write_input *req;
3332         int rc;
3333
3334         rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
3335         if (rc)
3336                 return rc;
3337
3338         if (data_len && data) {
3339                 dma_addr_t dma_handle;
3340                 u8 *kmem;
3341
3342                 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
3343                 if (!kmem) {
3344                         hwrm_req_drop(bp, req);
3345                         return -ENOMEM;
3346                 }
3347
3348                 req->dir_data_length = cpu_to_le32(data_len);
3349
3350                 memcpy(kmem, data, data_len);
3351                 req->host_src_addr = cpu_to_le64(dma_handle);
3352         }
3353
3354         hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
3355         req->dir_type = cpu_to_le16(dir_type);
3356         req->dir_ordinal = cpu_to_le16(dir_ordinal);
3357         req->dir_ext = cpu_to_le16(dir_ext);
3358         req->dir_attr = cpu_to_le16(dir_attr);
3359         req->dir_item_length = cpu_to_le32(dir_item_len);
3360         rc = hwrm_req_send(bp, req);
3361
3362         if (rc == -EACCES)
3363                 bnxt_print_admin_err(bp);
3364         return rc;
3365 }
3366
3367 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
3368                              u8 self_reset, u8 flags)
3369 {
3370         struct bnxt *bp = netdev_priv(dev);
3371         struct hwrm_fw_reset_input *req;
3372         int rc;
3373
3374         if (!bnxt_hwrm_reset_permitted(bp)) {
3375                 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
3376                 return -EPERM;
3377         }
3378
3379         rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
3380         if (rc)
3381                 return rc;
3382
3383         req->embedded_proc_type = proc_type;
3384         req->selfrst_status = self_reset;
3385         req->flags = flags;
3386
3387         if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
3388                 rc = hwrm_req_send_silent(bp, req);
3389         } else {
3390                 rc = hwrm_req_send(bp, req);
3391                 if (rc == -EACCES)
3392                         bnxt_print_admin_err(bp);
3393         }
3394         return rc;
3395 }
3396
3397 static int bnxt_firmware_reset(struct net_device *dev,
3398                                enum bnxt_nvm_directory_type dir_type)
3399 {
3400         u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
3401         u8 proc_type, flags = 0;
3402
3403         /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
3404         /*       (e.g. when firmware isn't already running) */
3405         switch (dir_type) {
3406         case BNX_DIR_TYPE_CHIMP_PATCH:
3407         case BNX_DIR_TYPE_BOOTCODE:
3408         case BNX_DIR_TYPE_BOOTCODE_2:
3409                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
3410                 /* Self-reset ChiMP upon next PCIe reset: */
3411                 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3412                 break;
3413         case BNX_DIR_TYPE_APE_FW:
3414         case BNX_DIR_TYPE_APE_PATCH:
3415                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
3416                 /* Self-reset APE upon next PCIe reset: */
3417                 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3418                 break;
3419         case BNX_DIR_TYPE_KONG_FW:
3420         case BNX_DIR_TYPE_KONG_PATCH:
3421                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
3422                 break;
3423         case BNX_DIR_TYPE_BONO_FW:
3424         case BNX_DIR_TYPE_BONO_PATCH:
3425                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
3426                 break;
3427         default:
3428                 return -EINVAL;
3429         }
3430
3431         return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
3432 }
3433
3434 static int bnxt_firmware_reset_chip(struct net_device *dev)
3435 {
3436         struct bnxt *bp = netdev_priv(dev);
3437         u8 flags = 0;
3438
3439         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
3440                 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
3441
3442         return bnxt_hwrm_firmware_reset(dev,
3443                                         FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
3444                                         FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
3445                                         flags);
3446 }
3447
3448 static int bnxt_firmware_reset_ap(struct net_device *dev)
3449 {
3450         return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
3451                                         FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
3452                                         0);
3453 }
3454
3455 static int bnxt_flash_firmware(struct net_device *dev,
3456                                u16 dir_type,
3457                                const u8 *fw_data,
3458                                size_t fw_size)
3459 {
3460         int     rc = 0;
3461         u16     code_type;
3462         u32     stored_crc;
3463         u32     calculated_crc;
3464         struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
3465
3466         switch (dir_type) {
3467         case BNX_DIR_TYPE_BOOTCODE:
3468         case BNX_DIR_TYPE_BOOTCODE_2:
3469                 code_type = CODE_BOOT;
3470                 break;
3471         case BNX_DIR_TYPE_CHIMP_PATCH:
3472                 code_type = CODE_CHIMP_PATCH;
3473                 break;
3474         case BNX_DIR_TYPE_APE_FW:
3475                 code_type = CODE_MCTP_PASSTHRU;
3476                 break;
3477         case BNX_DIR_TYPE_APE_PATCH:
3478                 code_type = CODE_APE_PATCH;
3479                 break;
3480         case BNX_DIR_TYPE_KONG_FW:
3481                 code_type = CODE_KONG_FW;
3482                 break;
3483         case BNX_DIR_TYPE_KONG_PATCH:
3484                 code_type = CODE_KONG_PATCH;
3485                 break;
3486         case BNX_DIR_TYPE_BONO_FW:
3487                 code_type = CODE_BONO_FW;
3488                 break;
3489         case BNX_DIR_TYPE_BONO_PATCH:
3490                 code_type = CODE_BONO_PATCH;
3491                 break;
3492         default:
3493                 netdev_err(dev, "Unsupported directory entry type: %u\n",
3494                            dir_type);
3495                 return -EINVAL;
3496         }
3497         if (fw_size < sizeof(struct bnxt_fw_header)) {
3498                 netdev_err(dev, "Invalid firmware file size: %u\n",
3499                            (unsigned int)fw_size);
3500                 return -EINVAL;
3501         }
3502         if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
3503                 netdev_err(dev, "Invalid firmware signature: %08X\n",
3504                            le32_to_cpu(header->signature));
3505                 return -EINVAL;
3506         }
3507         if (header->code_type != code_type) {
3508                 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
3509                            code_type, header->code_type);
3510                 return -EINVAL;
3511         }
3512         if (header->device != DEVICE_CUMULUS_FAMILY) {
3513                 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
3514                            DEVICE_CUMULUS_FAMILY, header->device);
3515                 return -EINVAL;
3516         }
3517         /* Confirm the CRC32 checksum of the file: */
3518         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3519                                              sizeof(stored_crc)));
3520         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3521         if (calculated_crc != stored_crc) {
3522                 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
3523                            (unsigned long)stored_crc,
3524                            (unsigned long)calculated_crc);
3525                 return -EINVAL;
3526         }
3527         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3528                               0, 0, 0, fw_data, fw_size);
3529         if (rc == 0)    /* Firmware update successful */
3530                 rc = bnxt_firmware_reset(dev, dir_type);
3531
3532         return rc;
3533 }
3534
3535 static int bnxt_flash_microcode(struct net_device *dev,
3536                                 u16 dir_type,
3537                                 const u8 *fw_data,
3538                                 size_t fw_size)
3539 {
3540         struct bnxt_ucode_trailer *trailer;
3541         u32 calculated_crc;
3542         u32 stored_crc;
3543         int rc = 0;
3544
3545         if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
3546                 netdev_err(dev, "Invalid microcode file size: %u\n",
3547                            (unsigned int)fw_size);
3548                 return -EINVAL;
3549         }
3550         trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
3551                                                 sizeof(*trailer)));
3552         if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
3553                 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
3554                            le32_to_cpu(trailer->sig));
3555                 return -EINVAL;
3556         }
3557         if (le16_to_cpu(trailer->dir_type) != dir_type) {
3558                 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
3559                            dir_type, le16_to_cpu(trailer->dir_type));
3560                 return -EINVAL;
3561         }
3562         if (le16_to_cpu(trailer->trailer_length) <
3563                 sizeof(struct bnxt_ucode_trailer)) {
3564                 netdev_err(dev, "Invalid microcode trailer length: %d\n",
3565                            le16_to_cpu(trailer->trailer_length));
3566                 return -EINVAL;
3567         }
3568
3569         /* Confirm the CRC32 checksum of the file: */
3570         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3571                                              sizeof(stored_crc)));
3572         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3573         if (calculated_crc != stored_crc) {
3574                 netdev_err(dev,
3575                            "CRC32 (%08lX) does not match calculated: %08lX\n",
3576                            (unsigned long)stored_crc,
3577                            (unsigned long)calculated_crc);
3578                 return -EINVAL;
3579         }
3580         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3581                               0, 0, 0, fw_data, fw_size);
3582
3583         return rc;
3584 }
3585
3586 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
3587 {
3588         switch (dir_type) {
3589         case BNX_DIR_TYPE_CHIMP_PATCH:
3590         case BNX_DIR_TYPE_BOOTCODE:
3591         case BNX_DIR_TYPE_BOOTCODE_2:
3592         case BNX_DIR_TYPE_APE_FW:
3593         case BNX_DIR_TYPE_APE_PATCH:
3594         case BNX_DIR_TYPE_KONG_FW:
3595         case BNX_DIR_TYPE_KONG_PATCH:
3596         case BNX_DIR_TYPE_BONO_FW:
3597         case BNX_DIR_TYPE_BONO_PATCH:
3598                 return true;
3599         }
3600
3601         return false;
3602 }
3603
3604 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
3605 {
3606         switch (dir_type) {
3607         case BNX_DIR_TYPE_AVS:
3608         case BNX_DIR_TYPE_EXP_ROM_MBA:
3609         case BNX_DIR_TYPE_PCIE:
3610         case BNX_DIR_TYPE_TSCF_UCODE:
3611         case BNX_DIR_TYPE_EXT_PHY:
3612         case BNX_DIR_TYPE_CCM:
3613         case BNX_DIR_TYPE_ISCSI_BOOT:
3614         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3615         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3616                 return true;
3617         }
3618
3619         return false;
3620 }
3621
3622 static bool bnxt_dir_type_is_executable(u16 dir_type)
3623 {
3624         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3625                 bnxt_dir_type_is_other_exec_format(dir_type);
3626 }
3627
3628 static int bnxt_flash_firmware_from_file(struct net_device *dev,
3629                                          u16 dir_type,
3630                                          const char *filename)
3631 {
3632         const struct firmware  *fw;
3633         int                     rc;
3634
3635         rc = request_firmware(&fw, filename, &dev->dev);
3636         if (rc != 0) {
3637                 netdev_err(dev, "Error %d requesting firmware file: %s\n",
3638                            rc, filename);
3639                 return rc;
3640         }
3641         if (bnxt_dir_type_is_ape_bin_format(dir_type))
3642                 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
3643         else if (bnxt_dir_type_is_other_exec_format(dir_type))
3644                 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
3645         else
3646                 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3647                                       0, 0, 0, fw->data, fw->size);
3648         release_firmware(fw);
3649         return rc;
3650 }
3651
3652 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3653 #define MSG_INVALID_PKG "PKG install error : Invalid package"
3654 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3655 #define MSG_INVALID_DEV "PKG install error : Invalid device"
3656 #define MSG_INTERNAL_ERR "PKG install error : Internal error"
3657 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3658 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3659 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3660 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3661 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3662
3663 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
3664                                     struct netlink_ext_ack *extack)
3665 {
3666         switch (result) {
3667         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
3668         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
3669         case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
3670         case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
3671         case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
3672         case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
3673                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
3674                 return -EINVAL;
3675         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
3676         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
3677         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
3678         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
3679         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
3680         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
3681         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
3682         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
3683         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
3684         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
3685         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
3686         case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
3687         case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
3688                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
3689                 return -ENOPKG;
3690         case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
3691                 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
3692                 return -EPERM;
3693         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
3694         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
3695         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
3696         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
3697         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
3698                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
3699                 return -EOPNOTSUPP;
3700         default:
3701                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
3702                 return -EIO;
3703         }
3704 }
3705
3706 #define BNXT_PKG_DMA_SIZE       0x40000
3707 #define BNXT_NVM_MORE_FLAG      (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3708 #define BNXT_NVM_LAST_FLAG      (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3709
3710 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
3711                                     struct netlink_ext_ack *extack)
3712 {
3713         u32 item_len;
3714         int rc;
3715
3716         rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3717                                   BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
3718                                   &item_len, NULL);
3719         if (rc) {
3720                 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3721                 return rc;
3722         }
3723
3724         if (fw_size > item_len) {
3725                 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
3726                                       BNX_DIR_ORDINAL_FIRST, 0, 1,
3727                                       round_up(fw_size, 4096), NULL, 0);
3728                 if (rc) {
3729                         BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
3730                         return rc;
3731                 }
3732         }
3733         return 0;
3734 }
3735
3736 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
3737                                    u32 install_type, struct netlink_ext_ack *extack)
3738 {
3739         struct hwrm_nvm_install_update_input *install;
3740         struct hwrm_nvm_install_update_output *resp;
3741         struct hwrm_nvm_modify_input *modify;
3742         struct bnxt *bp = netdev_priv(dev);
3743         bool defrag_attempted = false;
3744         dma_addr_t dma_handle;
3745         u8 *kmem = NULL;
3746         u32 modify_len;
3747         u32 item_len;
3748         u8 cmd_err;
3749         u16 index;
3750         int rc;
3751
3752         /* resize before flashing larger image than available space */
3753         rc = bnxt_resize_update_entry(dev, fw->size, extack);
3754         if (rc)
3755                 return rc;
3756
3757         bnxt_hwrm_fw_set_time(bp);
3758
3759         rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
3760         if (rc)
3761                 return rc;
3762
3763         /* Try allocating a large DMA buffer first.  Older fw will
3764          * cause excessive NVRAM erases when using small blocks.
3765          */
3766         modify_len = roundup_pow_of_two(fw->size);
3767         modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
3768         while (1) {
3769                 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
3770                 if (!kmem && modify_len > PAGE_SIZE)
3771                         modify_len /= 2;
3772                 else
3773                         break;
3774         }
3775         if (!kmem) {
3776                 hwrm_req_drop(bp, modify);
3777                 return -ENOMEM;
3778         }
3779
3780         rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
3781         if (rc) {
3782                 hwrm_req_drop(bp, modify);
3783                 return rc;
3784         }
3785
3786         hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
3787         hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
3788
3789         hwrm_req_hold(bp, modify);
3790         modify->host_src_addr = cpu_to_le64(dma_handle);
3791
3792         resp = hwrm_req_hold(bp, install);
3793         if ((install_type & 0xffff) == 0)
3794                 install_type >>= 16;
3795         install->install_type = cpu_to_le32(install_type);
3796
3797         do {
3798                 u32 copied = 0, len = modify_len;
3799
3800                 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3801                                           BNX_DIR_ORDINAL_FIRST,
3802                                           BNX_DIR_EXT_NONE,
3803                                           &index, &item_len, NULL);
3804                 if (rc) {
3805                         BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3806                         break;
3807                 }
3808                 if (fw->size > item_len) {
3809                         BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
3810                         rc = -EFBIG;
3811                         break;
3812                 }
3813
3814                 modify->dir_idx = cpu_to_le16(index);
3815
3816                 if (fw->size > modify_len)
3817                         modify->flags = BNXT_NVM_MORE_FLAG;
3818                 while (copied < fw->size) {
3819                         u32 balance = fw->size - copied;
3820
3821                         if (balance <= modify_len) {
3822                                 len = balance;
3823                                 if (copied)
3824                                         modify->flags |= BNXT_NVM_LAST_FLAG;
3825                         }
3826                         memcpy(kmem, fw->data + copied, len);
3827                         modify->len = cpu_to_le32(len);
3828                         modify->offset = cpu_to_le32(copied);
3829                         rc = hwrm_req_send(bp, modify);
3830                         if (rc)
3831                                 goto pkg_abort;
3832                         copied += len;
3833                 }
3834
3835                 rc = hwrm_req_send_silent(bp, install);
3836                 if (!rc)
3837                         break;
3838
3839                 if (defrag_attempted) {
3840                         /* We have tried to defragment already in the previous
3841                          * iteration. Return with the result for INSTALL_UPDATE
3842                          */
3843                         break;
3844                 }
3845
3846                 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3847
3848                 switch (cmd_err) {
3849                 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
3850                         BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
3851                         rc = -EALREADY;
3852                         break;
3853                 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
3854                         install->flags =
3855                                 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
3856
3857                         rc = hwrm_req_send_silent(bp, install);
3858                         if (!rc)
3859                                 break;
3860
3861                         cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3862
3863                         if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
3864                                 /* FW has cleared NVM area, driver will create
3865                                  * UPDATE directory and try the flash again
3866                                  */
3867                                 defrag_attempted = true;
3868                                 install->flags = 0;
3869                                 rc = bnxt_flash_nvram(bp->dev,
3870                                                       BNX_DIR_TYPE_UPDATE,
3871                                                       BNX_DIR_ORDINAL_FIRST,
3872                                                       0, 0, item_len, NULL, 0);
3873                                 if (!rc)
3874                                         break;
3875                         }
3876                         fallthrough;
3877                 default:
3878                         BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
3879                 }
3880         } while (defrag_attempted && !rc);
3881
3882 pkg_abort:
3883         hwrm_req_drop(bp, modify);
3884         hwrm_req_drop(bp, install);
3885
3886         if (resp->result) {
3887                 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
3888                            (s8)resp->result, (int)resp->problem_item);
3889                 rc = nvm_update_err_to_stderr(dev, resp->result, extack);
3890         }
3891         if (rc == -EACCES)
3892                 bnxt_print_admin_err(bp);
3893         return rc;
3894 }
3895
3896 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
3897                                         u32 install_type, struct netlink_ext_ack *extack)
3898 {
3899         const struct firmware *fw;
3900         int rc;
3901
3902         rc = request_firmware(&fw, filename, &dev->dev);
3903         if (rc != 0) {
3904                 netdev_err(dev, "PKG error %d requesting file: %s\n",
3905                            rc, filename);
3906                 return rc;
3907         }
3908
3909         rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
3910
3911         release_firmware(fw);
3912
3913         return rc;
3914 }
3915
3916 static int bnxt_flash_device(struct net_device *dev,
3917                              struct ethtool_flash *flash)
3918 {
3919         if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
3920                 netdev_err(dev, "flashdev not supported from a virtual function\n");
3921                 return -EINVAL;
3922         }
3923
3924         if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
3925             flash->region > 0xffff)
3926                 return bnxt_flash_package_from_file(dev, flash->data,
3927                                                     flash->region, NULL);
3928
3929         return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
3930 }
3931
3932 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
3933 {
3934         struct hwrm_nvm_get_dir_info_output *output;
3935         struct hwrm_nvm_get_dir_info_input *req;
3936         struct bnxt *bp = netdev_priv(dev);
3937         int rc;
3938
3939         rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
3940         if (rc)
3941                 return rc;
3942
3943         output = hwrm_req_hold(bp, req);
3944         rc = hwrm_req_send(bp, req);
3945         if (!rc) {
3946                 *entries = le32_to_cpu(output->entries);
3947                 *length = le32_to_cpu(output->entry_length);
3948         }
3949         hwrm_req_drop(bp, req);
3950         return rc;
3951 }
3952
3953 static int bnxt_get_eeprom_len(struct net_device *dev)
3954 {
3955         struct bnxt *bp = netdev_priv(dev);
3956
3957         if (BNXT_VF(bp))
3958                 return 0;
3959
3960         /* The -1 return value allows the entire 32-bit range of offsets to be
3961          * passed via the ethtool command-line utility.
3962          */
3963         return -1;
3964 }
3965
3966 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
3967 {
3968         struct bnxt *bp = netdev_priv(dev);
3969         int rc;
3970         u32 dir_entries;
3971         u32 entry_length;
3972         u8 *buf;
3973         size_t buflen;
3974         dma_addr_t dma_handle;
3975         struct hwrm_nvm_get_dir_entries_input *req;
3976
3977         rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
3978         if (rc != 0)
3979                 return rc;
3980
3981         if (!dir_entries || !entry_length)
3982                 return -EIO;
3983
3984         /* Insert 2 bytes of directory info (count and size of entries) */
3985         if (len < 2)
3986                 return -EINVAL;
3987
3988         *data++ = dir_entries;
3989         *data++ = entry_length;
3990         len -= 2;
3991         memset(data, 0xff, len);
3992
3993         rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
3994         if (rc)
3995                 return rc;
3996
3997         buflen = mul_u32_u32(dir_entries, entry_length);
3998         buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
3999         if (!buf) {
4000                 hwrm_req_drop(bp, req);
4001                 return -ENOMEM;
4002         }
4003         req->host_dest_addr = cpu_to_le64(dma_handle);
4004
4005         hwrm_req_hold(bp, req); /* hold the slice */
4006         rc = hwrm_req_send(bp, req);
4007         if (rc == 0)
4008                 memcpy(data, buf, len > buflen ? buflen : len);
4009         hwrm_req_drop(bp, req);
4010         return rc;
4011 }
4012
4013 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
4014                         u32 length, u8 *data)
4015 {
4016         struct bnxt *bp = netdev_priv(dev);
4017         int rc;
4018         u8 *buf;
4019         dma_addr_t dma_handle;
4020         struct hwrm_nvm_read_input *req;
4021
4022         if (!length)
4023                 return -EINVAL;
4024
4025         rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
4026         if (rc)
4027                 return rc;
4028
4029         buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
4030         if (!buf) {
4031                 hwrm_req_drop(bp, req);
4032                 return -ENOMEM;
4033         }
4034
4035         req->host_dest_addr = cpu_to_le64(dma_handle);
4036         req->dir_idx = cpu_to_le16(index);
4037         req->offset = cpu_to_le32(offset);
4038         req->len = cpu_to_le32(length);
4039
4040         hwrm_req_hold(bp, req); /* hold the slice */
4041         rc = hwrm_req_send(bp, req);
4042         if (rc == 0)
4043                 memcpy(data, buf, length);
4044         hwrm_req_drop(bp, req);
4045         return rc;
4046 }
4047
4048 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
4049                          u16 ext, u16 *index, u32 *item_length,
4050                          u32 *data_length)
4051 {
4052         struct hwrm_nvm_find_dir_entry_output *output;
4053         struct hwrm_nvm_find_dir_entry_input *req;
4054         struct bnxt *bp = netdev_priv(dev);
4055         int rc;
4056
4057         rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
4058         if (rc)
4059                 return rc;
4060
4061         req->enables = 0;
4062         req->dir_idx = 0;
4063         req->dir_type = cpu_to_le16(type);
4064         req->dir_ordinal = cpu_to_le16(ordinal);
4065         req->dir_ext = cpu_to_le16(ext);
4066         req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
4067         output = hwrm_req_hold(bp, req);
4068         rc = hwrm_req_send_silent(bp, req);
4069         if (rc == 0) {
4070                 if (index)
4071                         *index = le16_to_cpu(output->dir_idx);
4072                 if (item_length)
4073                         *item_length = le32_to_cpu(output->dir_item_length);
4074                 if (data_length)
4075                         *data_length = le32_to_cpu(output->dir_data_length);
4076         }
4077         hwrm_req_drop(bp, req);
4078         return rc;
4079 }
4080
4081 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
4082 {
4083         char    *retval = NULL;
4084         char    *p;
4085         char    *value;
4086         int     field = 0;
4087
4088         if (datalen < 1)
4089                 return NULL;
4090         /* null-terminate the log data (removing last '\n'): */
4091         data[datalen - 1] = 0;
4092         for (p = data; *p != 0; p++) {
4093                 field = 0;
4094                 retval = NULL;
4095                 while (*p != 0 && *p != '\n') {
4096                         value = p;
4097                         while (*p != 0 && *p != '\t' && *p != '\n')
4098                                 p++;
4099                         if (field == desired_field)
4100                                 retval = value;
4101                         if (*p != '\t')
4102                                 break;
4103                         *p = 0;
4104                         field++;
4105                         p++;
4106                 }
4107                 if (*p == 0)
4108                         break;
4109                 *p = 0;
4110         }
4111         return retval;
4112 }
4113
4114 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
4115 {
4116         struct bnxt *bp = netdev_priv(dev);
4117         u16 index = 0;
4118         char *pkgver;
4119         u32 pkglen;
4120         u8 *pkgbuf;
4121         int rc;
4122
4123         rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
4124                                   BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
4125                                   &index, NULL, &pkglen);
4126         if (rc)
4127                 return rc;
4128
4129         pkgbuf = kzalloc(pkglen, GFP_KERNEL);
4130         if (!pkgbuf) {
4131                 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
4132                         pkglen);
4133                 return -ENOMEM;
4134         }
4135
4136         rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
4137         if (rc)
4138                 goto err;
4139
4140         pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
4141                                    pkglen);
4142         if (pkgver && *pkgver != 0 && isdigit(*pkgver))
4143                 strscpy(ver, pkgver, size);
4144         else
4145                 rc = -ENOENT;
4146
4147 err:
4148         kfree(pkgbuf);
4149
4150         return rc;
4151 }
4152
4153 static void bnxt_get_pkgver(struct net_device *dev)
4154 {
4155         struct bnxt *bp = netdev_priv(dev);
4156         char buf[FW_VER_STR_LEN];
4157         int len;
4158
4159         if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
4160                 len = strlen(bp->fw_ver_str);
4161                 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
4162                          "/pkg %s", buf);
4163         }
4164 }
4165
4166 static int bnxt_get_eeprom(struct net_device *dev,
4167                            struct ethtool_eeprom *eeprom,
4168                            u8 *data)
4169 {
4170         u32 index;
4171         u32 offset;
4172
4173         if (eeprom->offset == 0) /* special offset value to get directory */
4174                 return bnxt_get_nvram_directory(dev, eeprom->len, data);
4175
4176         index = eeprom->offset >> 24;
4177         offset = eeprom->offset & 0xffffff;
4178
4179         if (index == 0) {
4180                 netdev_err(dev, "unsupported index value: %d\n", index);
4181                 return -EINVAL;
4182         }
4183
4184         return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
4185 }
4186
4187 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
4188 {
4189         struct hwrm_nvm_erase_dir_entry_input *req;
4190         struct bnxt *bp = netdev_priv(dev);
4191         int rc;
4192
4193         rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
4194         if (rc)
4195                 return rc;
4196
4197         req->dir_idx = cpu_to_le16(index);
4198         return hwrm_req_send(bp, req);
4199 }
4200
4201 static int bnxt_set_eeprom(struct net_device *dev,
4202                            struct ethtool_eeprom *eeprom,
4203                            u8 *data)
4204 {
4205         struct bnxt *bp = netdev_priv(dev);
4206         u8 index, dir_op;
4207         u16 type, ext, ordinal, attr;
4208
4209         if (!BNXT_PF(bp)) {
4210                 netdev_err(dev, "NVM write not supported from a virtual function\n");
4211                 return -EINVAL;
4212         }
4213
4214         type = eeprom->magic >> 16;
4215
4216         if (type == 0xffff) { /* special value for directory operations */
4217                 index = eeprom->magic & 0xff;
4218                 dir_op = eeprom->magic >> 8;
4219                 if (index == 0)
4220                         return -EINVAL;
4221                 switch (dir_op) {
4222                 case 0x0e: /* erase */
4223                         if (eeprom->offset != ~eeprom->magic)
4224                                 return -EINVAL;
4225                         return bnxt_erase_nvram_directory(dev, index - 1);
4226                 default:
4227                         return -EINVAL;
4228                 }
4229         }
4230
4231         /* Create or re-write an NVM item: */
4232         if (bnxt_dir_type_is_executable(type))
4233                 return -EOPNOTSUPP;
4234         ext = eeprom->magic & 0xffff;
4235         ordinal = eeprom->offset >> 16;
4236         attr = eeprom->offset & 0xffff;
4237
4238         return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
4239                                 eeprom->len);
4240 }
4241
4242 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
4243 {
4244         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
4245         __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
4246         struct bnxt *bp = netdev_priv(dev);
4247         struct ethtool_keee *eee = &bp->eee;
4248         struct bnxt_link_info *link_info = &bp->link_info;
4249         int rc = 0;
4250
4251         if (!BNXT_PHY_CFG_ABLE(bp))
4252                 return -EOPNOTSUPP;
4253
4254         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
4255                 return -EOPNOTSUPP;
4256
4257         mutex_lock(&bp->link_lock);
4258         _bnxt_fw_to_linkmode(advertising, link_info->advertising);
4259         if (!edata->eee_enabled)
4260                 goto eee_ok;
4261
4262         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4263                 netdev_warn(dev, "EEE requires autoneg\n");
4264                 rc = -EINVAL;
4265                 goto eee_exit;
4266         }
4267         if (edata->tx_lpi_enabled) {
4268                 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
4269                                        edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
4270                         netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
4271                                     bp->lpi_tmr_lo, bp->lpi_tmr_hi);
4272                         rc = -EINVAL;
4273                         goto eee_exit;
4274                 } else if (!bp->lpi_tmr_hi) {
4275                         edata->tx_lpi_timer = eee->tx_lpi_timer;
4276                 }
4277         }
4278         if (linkmode_empty(edata->advertised)) {
4279                 linkmode_and(edata->advertised, advertising, eee->supported);
4280         } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
4281                 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
4282                 rc = -EINVAL;
4283                 goto eee_exit;
4284         }
4285
4286         linkmode_copy(eee->advertised, edata->advertised);
4287         eee->tx_lpi_enabled = edata->tx_lpi_enabled;
4288         eee->tx_lpi_timer = edata->tx_lpi_timer;
4289 eee_ok:
4290         eee->eee_enabled = edata->eee_enabled;
4291
4292         if (netif_running(dev))
4293                 rc = bnxt_hwrm_set_link_setting(bp, false, true);
4294
4295 eee_exit:
4296         mutex_unlock(&bp->link_lock);
4297         return rc;
4298 }
4299
4300 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
4301 {
4302         struct bnxt *bp = netdev_priv(dev);
4303
4304         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
4305                 return -EOPNOTSUPP;
4306
4307         *edata = bp->eee;
4308         if (!bp->eee.eee_enabled) {
4309                 /* Preserve tx_lpi_timer so that the last value will be used
4310                  * by default when it is re-enabled.
4311                  */
4312                 linkmode_zero(edata->advertised);
4313                 edata->tx_lpi_enabled = 0;
4314         }
4315
4316         if (!bp->eee.eee_active)
4317                 linkmode_zero(edata->lp_advertised);
4318
4319         return 0;
4320 }
4321
4322 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
4323                                             u16 page_number, u8 bank,
4324                                             u16 start_addr, u16 data_length,
4325                                             u8 *buf)
4326 {
4327         struct hwrm_port_phy_i2c_read_output *output;
4328         struct hwrm_port_phy_i2c_read_input *req;
4329         int rc, byte_offset = 0;
4330
4331         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
4332         if (rc)
4333                 return rc;
4334
4335         output = hwrm_req_hold(bp, req);
4336         req->i2c_slave_addr = i2c_addr;
4337         req->page_number = cpu_to_le16(page_number);
4338         req->port_id = cpu_to_le16(bp->pf.port_id);
4339         do {
4340                 u16 xfer_size;
4341
4342                 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
4343                 data_length -= xfer_size;
4344                 req->page_offset = cpu_to_le16(start_addr + byte_offset);
4345                 req->data_length = xfer_size;
4346                 req->enables =
4347                         cpu_to_le32((start_addr + byte_offset ?
4348                                      PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
4349                                      0) |
4350                                     (bank ?
4351                                      PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
4352                                      0));
4353                 rc = hwrm_req_send(bp, req);
4354                 if (!rc)
4355                         memcpy(buf + byte_offset, output->data, xfer_size);
4356                 byte_offset += xfer_size;
4357         } while (!rc && data_length > 0);
4358         hwrm_req_drop(bp, req);
4359
4360         return rc;
4361 }
4362
4363 static int bnxt_get_module_info(struct net_device *dev,
4364                                 struct ethtool_modinfo *modinfo)
4365 {
4366         u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
4367         struct bnxt *bp = netdev_priv(dev);
4368         int rc;
4369
4370         /* No point in going further if phy status indicates
4371          * module is not inserted or if it is powered down or
4372          * if it is of type 10GBase-T
4373          */
4374         if (bp->link_info.module_status >
4375                 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4376                 return -EOPNOTSUPP;
4377
4378         /* This feature is not supported in older firmware versions */
4379         if (bp->hwrm_spec_code < 0x10202)
4380                 return -EOPNOTSUPP;
4381
4382         rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
4383                                               SFF_DIAG_SUPPORT_OFFSET + 1,
4384                                               data);
4385         if (!rc) {
4386                 u8 module_id = data[0];
4387                 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
4388
4389                 switch (module_id) {
4390                 case SFF_MODULE_ID_SFP:
4391                         modinfo->type = ETH_MODULE_SFF_8472;
4392                         modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4393                         if (!diag_supported)
4394                                 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4395                         break;
4396                 case SFF_MODULE_ID_QSFP:
4397                 case SFF_MODULE_ID_QSFP_PLUS:
4398                         modinfo->type = ETH_MODULE_SFF_8436;
4399                         modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4400                         break;
4401                 case SFF_MODULE_ID_QSFP28:
4402                         modinfo->type = ETH_MODULE_SFF_8636;
4403                         modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
4404                         break;
4405                 default:
4406                         rc = -EOPNOTSUPP;
4407                         break;
4408                 }
4409         }
4410         return rc;
4411 }
4412
4413 static int bnxt_get_module_eeprom(struct net_device *dev,
4414                                   struct ethtool_eeprom *eeprom,
4415                                   u8 *data)
4416 {
4417         struct bnxt *bp = netdev_priv(dev);
4418         u16  start = eeprom->offset, length = eeprom->len;
4419         int rc = 0;
4420
4421         memset(data, 0, eeprom->len);
4422
4423         /* Read A0 portion of the EEPROM */
4424         if (start < ETH_MODULE_SFF_8436_LEN) {
4425                 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
4426                         length = ETH_MODULE_SFF_8436_LEN - start;
4427                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
4428                                                       start, length, data);
4429                 if (rc)
4430                         return rc;
4431                 start += length;
4432                 data += length;
4433                 length = eeprom->len - length;
4434         }
4435
4436         /* Read A2 portion of the EEPROM */
4437         if (length) {
4438                 start -= ETH_MODULE_SFF_8436_LEN;
4439                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
4440                                                       start, length, data);
4441         }
4442         return rc;
4443 }
4444
4445 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
4446 {
4447         if (bp->link_info.module_status <=
4448             PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4449                 return 0;
4450
4451         switch (bp->link_info.module_status) {
4452         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
4453                 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
4454                 break;
4455         case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
4456                 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
4457                 break;
4458         case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
4459                 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
4460                 break;
4461         default:
4462                 NL_SET_ERR_MSG_MOD(extack, "Unknown error");
4463                 break;
4464         }
4465         return -EINVAL;
4466 }
4467
4468 static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
4469                                           const struct ethtool_module_eeprom *page_data,
4470                                           struct netlink_ext_ack *extack)
4471 {
4472         struct bnxt *bp = netdev_priv(dev);
4473         int rc;
4474
4475         rc = bnxt_get_module_status(bp, extack);
4476         if (rc)
4477                 return rc;
4478
4479         if (bp->hwrm_spec_code < 0x10202) {
4480                 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
4481                 return -EINVAL;
4482         }
4483
4484         if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
4485                 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
4486                 return -EINVAL;
4487         }
4488
4489         rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
4490                                               page_data->page, page_data->bank,
4491                                               page_data->offset,
4492                                               page_data->length,
4493                                               page_data->data);
4494         if (rc) {
4495                 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
4496                 return rc;
4497         }
4498         return page_data->length;
4499 }
4500
4501 static int bnxt_nway_reset(struct net_device *dev)
4502 {
4503         int rc = 0;
4504
4505         struct bnxt *bp = netdev_priv(dev);
4506         struct bnxt_link_info *link_info = &bp->link_info;
4507
4508         if (!BNXT_PHY_CFG_ABLE(bp))
4509                 return -EOPNOTSUPP;
4510
4511         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
4512                 return -EINVAL;
4513
4514         if (netif_running(dev))
4515                 rc = bnxt_hwrm_set_link_setting(bp, true, false);
4516
4517         return rc;
4518 }
4519
4520 static int bnxt_set_phys_id(struct net_device *dev,
4521                             enum ethtool_phys_id_state state)
4522 {
4523         struct hwrm_port_led_cfg_input *req;
4524         struct bnxt *bp = netdev_priv(dev);
4525         struct bnxt_pf_info *pf = &bp->pf;
4526         struct bnxt_led_cfg *led_cfg;
4527         u8 led_state;
4528         __le16 duration;
4529         int rc, i;
4530
4531         if (!bp->num_leds || BNXT_VF(bp))
4532                 return -EOPNOTSUPP;
4533
4534         if (state == ETHTOOL_ID_ACTIVE) {
4535                 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
4536                 duration = cpu_to_le16(500);
4537         } else if (state == ETHTOOL_ID_INACTIVE) {
4538                 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
4539                 duration = cpu_to_le16(0);
4540         } else {
4541                 return -EINVAL;
4542         }
4543         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
4544         if (rc)
4545                 return rc;
4546
4547         req->port_id = cpu_to_le16(pf->port_id);
4548         req->num_leds = bp->num_leds;
4549         led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
4550         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
4551                 req->enables |= BNXT_LED_DFLT_ENABLES(i);
4552                 led_cfg->led_id = bp->leds[i].led_id;
4553                 led_cfg->led_state = led_state;
4554                 led_cfg->led_blink_on = duration;
4555                 led_cfg->led_blink_off = duration;
4556                 led_cfg->led_group_id = bp->leds[i].led_group_id;
4557         }
4558         return hwrm_req_send(bp, req);
4559 }
4560
4561 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
4562 {
4563         struct hwrm_selftest_irq_input *req;
4564         int rc;
4565
4566         rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
4567         if (rc)
4568                 return rc;
4569
4570         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4571         return hwrm_req_send(bp, req);
4572 }
4573
4574 static int bnxt_test_irq(struct bnxt *bp)
4575 {
4576         int i;
4577
4578         for (i = 0; i < bp->cp_nr_rings; i++) {
4579                 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
4580                 int rc;
4581
4582                 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
4583                 if (rc)
4584                         return rc;
4585         }
4586         return 0;
4587 }
4588
4589 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
4590 {
4591         struct hwrm_port_mac_cfg_input *req;
4592         int rc;
4593
4594         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
4595         if (rc)
4596                 return rc;
4597
4598         req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
4599         if (enable)
4600                 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
4601         else
4602                 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
4603         return hwrm_req_send(bp, req);
4604 }
4605
4606 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
4607 {
4608         struct hwrm_port_phy_qcaps_output *resp;
4609         struct hwrm_port_phy_qcaps_input *req;
4610         int rc;
4611
4612         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
4613         if (rc)
4614                 return rc;
4615
4616         resp = hwrm_req_hold(bp, req);
4617         rc = hwrm_req_send(bp, req);
4618         if (!rc)
4619                 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
4620
4621         hwrm_req_drop(bp, req);
4622         return rc;
4623 }
4624
4625 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
4626                                     struct hwrm_port_phy_cfg_input *req)
4627 {
4628         struct bnxt_link_info *link_info = &bp->link_info;
4629         u16 fw_advertising;
4630         u16 fw_speed;
4631         int rc;
4632
4633         if (!link_info->autoneg ||
4634             (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
4635                 return 0;
4636
4637         rc = bnxt_query_force_speeds(bp, &fw_advertising);
4638         if (rc)
4639                 return rc;
4640
4641         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
4642         if (BNXT_LINK_IS_UP(bp))
4643                 fw_speed = bp->link_info.link_speed;
4644         else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
4645                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
4646         else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
4647                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
4648         else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
4649                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
4650         else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
4651                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
4652
4653         req->force_link_speed = cpu_to_le16(fw_speed);
4654         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
4655                                   PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4656         rc = hwrm_req_send(bp, req);
4657         req->flags = 0;
4658         req->force_link_speed = cpu_to_le16(0);
4659         return rc;
4660 }
4661
4662 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
4663 {
4664         struct hwrm_port_phy_cfg_input *req;
4665         int rc;
4666
4667         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
4668         if (rc)
4669                 return rc;
4670
4671         /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4672         hwrm_req_hold(bp, req);
4673
4674         if (enable) {
4675                 bnxt_disable_an_for_lpbk(bp, req);
4676                 if (ext)
4677                         req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
4678                 else
4679                         req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
4680         } else {
4681                 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
4682         }
4683         req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
4684         rc = hwrm_req_send(bp, req);
4685         hwrm_req_drop(bp, req);
4686         return rc;
4687 }
4688
4689 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4690                             u32 raw_cons, int pkt_size)
4691 {
4692         struct bnxt_napi *bnapi = cpr->bnapi;
4693         struct bnxt_rx_ring_info *rxr;
4694         struct bnxt_sw_rx_bd *rx_buf;
4695         struct rx_cmp *rxcmp;
4696         u16 cp_cons, cons;
4697         u8 *data;
4698         u32 len;
4699         int i;
4700
4701         rxr = bnapi->rx_ring;
4702         cp_cons = RING_CMP(raw_cons);
4703         rxcmp = (struct rx_cmp *)
4704                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
4705         cons = rxcmp->rx_cmp_opaque;
4706         rx_buf = &rxr->rx_buf_ring[cons];
4707         data = rx_buf->data_ptr;
4708         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
4709         if (len != pkt_size)
4710                 return -EIO;
4711         i = ETH_ALEN;
4712         if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
4713                 return -EIO;
4714         i += ETH_ALEN;
4715         for (  ; i < pkt_size; i++) {
4716                 if (data[i] != (u8)(i & 0xff))
4717                         return -EIO;
4718         }
4719         return 0;
4720 }
4721
4722 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4723                               int pkt_size)
4724 {
4725         struct tx_cmp *txcmp;
4726         int rc = -EIO;
4727         u32 raw_cons;
4728         u32 cons;
4729         int i;
4730
4731         raw_cons = cpr->cp_raw_cons;
4732         for (i = 0; i < 200; i++) {
4733                 cons = RING_CMP(raw_cons);
4734                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
4735
4736                 if (!TX_CMP_VALID(txcmp, raw_cons)) {
4737                         udelay(5);
4738                         continue;
4739                 }
4740
4741                 /* The valid test of the entry must be done first before
4742                  * reading any further.
4743                  */
4744                 dma_rmb();
4745                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
4746                     TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
4747                         rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
4748                         raw_cons = NEXT_RAW_CMP(raw_cons);
4749                         raw_cons = NEXT_RAW_CMP(raw_cons);
4750                         break;
4751                 }
4752                 raw_cons = NEXT_RAW_CMP(raw_cons);
4753         }
4754         cpr->cp_raw_cons = raw_cons;
4755         return rc;
4756 }
4757
4758 static int bnxt_run_loopback(struct bnxt *bp)
4759 {
4760         struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
4761         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4762         struct bnxt_cp_ring_info *cpr;
4763         int pkt_size, i = 0;
4764         struct sk_buff *skb;
4765         dma_addr_t map;
4766         u8 *data;
4767         int rc;
4768
4769         cpr = &rxr->bnapi->cp_ring;
4770         if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4771                 cpr = rxr->rx_cpr;
4772         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
4773         skb = netdev_alloc_skb(bp->dev, pkt_size);
4774         if (!skb)
4775                 return -ENOMEM;
4776         data = skb_put(skb, pkt_size);
4777         ether_addr_copy(&data[i], bp->dev->dev_addr);
4778         i += ETH_ALEN;
4779         ether_addr_copy(&data[i], bp->dev->dev_addr);
4780         i += ETH_ALEN;
4781         for ( ; i < pkt_size; i++)
4782                 data[i] = (u8)(i & 0xff);
4783
4784         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
4785                              DMA_TO_DEVICE);
4786         if (dma_mapping_error(&bp->pdev->dev, map)) {
4787                 dev_kfree_skb(skb);
4788                 return -EIO;
4789         }
4790         bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
4791
4792         /* Sync BD data before updating doorbell */
4793         wmb();
4794
4795         bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
4796         rc = bnxt_poll_loopback(bp, cpr, pkt_size);
4797
4798         dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
4799         dev_kfree_skb(skb);
4800         return rc;
4801 }
4802
4803 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
4804 {
4805         struct hwrm_selftest_exec_output *resp;
4806         struct hwrm_selftest_exec_input *req;
4807         int rc;
4808
4809         rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
4810         if (rc)
4811                 return rc;
4812
4813         hwrm_req_timeout(bp, req, bp->test_info->timeout);
4814         req->flags = test_mask;
4815
4816         resp = hwrm_req_hold(bp, req);
4817         rc = hwrm_req_send(bp, req);
4818         *test_results = resp->test_success;
4819         hwrm_req_drop(bp, req);
4820         return rc;
4821 }
4822
4823 #define BNXT_DRV_TESTS                  4
4824 #define BNXT_MACLPBK_TEST_IDX           (bp->num_tests - BNXT_DRV_TESTS)
4825 #define BNXT_PHYLPBK_TEST_IDX           (BNXT_MACLPBK_TEST_IDX + 1)
4826 #define BNXT_EXTLPBK_TEST_IDX           (BNXT_MACLPBK_TEST_IDX + 2)
4827 #define BNXT_IRQ_TEST_IDX               (BNXT_MACLPBK_TEST_IDX + 3)
4828
4829 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
4830                            u64 *buf)
4831 {
4832         struct bnxt *bp = netdev_priv(dev);
4833         bool do_ext_lpbk = false;
4834         bool offline = false;
4835         u8 test_results = 0;
4836         u8 test_mask = 0;
4837         int rc = 0, i;
4838
4839         if (!bp->num_tests || !BNXT_PF(bp))
4840                 return;
4841
4842         if (etest->flags & ETH_TEST_FL_OFFLINE &&
4843             bnxt_ulp_registered(bp->edev)) {
4844                 etest->flags |= ETH_TEST_FL_FAILED;
4845                 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
4846                 return;
4847         }
4848
4849         memset(buf, 0, sizeof(u64) * bp->num_tests);
4850         if (!netif_running(dev)) {
4851                 etest->flags |= ETH_TEST_FL_FAILED;
4852                 return;
4853         }
4854
4855         if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
4856             (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
4857                 do_ext_lpbk = true;
4858
4859         if (etest->flags & ETH_TEST_FL_OFFLINE) {
4860                 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
4861                         etest->flags |= ETH_TEST_FL_FAILED;
4862                         netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
4863                         return;
4864                 }
4865                 offline = true;
4866         }
4867
4868         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4869                 u8 bit_val = 1 << i;
4870
4871                 if (!(bp->test_info->offline_mask & bit_val))
4872                         test_mask |= bit_val;
4873                 else if (offline)
4874                         test_mask |= bit_val;
4875         }
4876         if (!offline) {
4877                 bnxt_run_fw_tests(bp, test_mask, &test_results);
4878         } else {
4879                 bnxt_close_nic(bp, true, false);
4880                 bnxt_run_fw_tests(bp, test_mask, &test_results);
4881
4882                 buf[BNXT_MACLPBK_TEST_IDX] = 1;
4883                 bnxt_hwrm_mac_loopback(bp, true);
4884                 msleep(250);
4885                 rc = bnxt_half_open_nic(bp);
4886                 if (rc) {
4887                         bnxt_hwrm_mac_loopback(bp, false);
4888                         etest->flags |= ETH_TEST_FL_FAILED;
4889                         return;
4890                 }
4891                 if (bnxt_run_loopback(bp))
4892                         etest->flags |= ETH_TEST_FL_FAILED;
4893                 else
4894                         buf[BNXT_MACLPBK_TEST_IDX] = 0;
4895
4896                 bnxt_hwrm_mac_loopback(bp, false);
4897                 bnxt_hwrm_phy_loopback(bp, true, false);
4898                 msleep(1000);
4899                 if (bnxt_run_loopback(bp)) {
4900                         buf[BNXT_PHYLPBK_TEST_IDX] = 1;
4901                         etest->flags |= ETH_TEST_FL_FAILED;
4902                 }
4903                 if (do_ext_lpbk) {
4904                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
4905                         bnxt_hwrm_phy_loopback(bp, true, true);
4906                         msleep(1000);
4907                         if (bnxt_run_loopback(bp)) {
4908                                 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
4909                                 etest->flags |= ETH_TEST_FL_FAILED;
4910                         }
4911                 }
4912                 bnxt_hwrm_phy_loopback(bp, false, false);
4913                 bnxt_half_close_nic(bp);
4914                 rc = bnxt_open_nic(bp, true, true);
4915         }
4916         if (rc || bnxt_test_irq(bp)) {
4917                 buf[BNXT_IRQ_TEST_IDX] = 1;
4918                 etest->flags |= ETH_TEST_FL_FAILED;
4919         }
4920         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4921                 u8 bit_val = 1 << i;
4922
4923                 if ((test_mask & bit_val) && !(test_results & bit_val)) {
4924                         buf[i] = 1;
4925                         etest->flags |= ETH_TEST_FL_FAILED;
4926                 }
4927         }
4928 }
4929
4930 static int bnxt_reset(struct net_device *dev, u32 *flags)
4931 {
4932         struct bnxt *bp = netdev_priv(dev);
4933         bool reload = false;
4934         u32 req = *flags;
4935
4936         if (!req)
4937                 return -EINVAL;
4938
4939         if (!BNXT_PF(bp)) {
4940                 netdev_err(dev, "Reset is not supported from a VF\n");
4941                 return -EOPNOTSUPP;
4942         }
4943
4944         if (pci_vfs_assigned(bp->pdev) &&
4945             !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
4946                 netdev_err(dev,
4947                            "Reset not allowed when VFs are assigned to VMs\n");
4948                 return -EBUSY;
4949         }
4950
4951         if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
4952                 /* This feature is not supported in older firmware versions */
4953                 if (bp->hwrm_spec_code >= 0x10803) {
4954                         if (!bnxt_firmware_reset_chip(dev)) {
4955                                 netdev_info(dev, "Firmware reset request successful.\n");
4956                                 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
4957                                         reload = true;
4958                                 *flags &= ~BNXT_FW_RESET_CHIP;
4959                         }
4960                 } else if (req == BNXT_FW_RESET_CHIP) {
4961                         return -EOPNOTSUPP; /* only request, fail hard */
4962                 }
4963         }
4964
4965         if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
4966                 /* This feature is not supported in older firmware versions */
4967                 if (bp->hwrm_spec_code >= 0x10803) {
4968                         if (!bnxt_firmware_reset_ap(dev)) {
4969                                 netdev_info(dev, "Reset application processor successful.\n");
4970                                 reload = true;
4971                                 *flags &= ~BNXT_FW_RESET_AP;
4972                         }
4973                 } else if (req == BNXT_FW_RESET_AP) {
4974                         return -EOPNOTSUPP; /* only request, fail hard */
4975                 }
4976         }
4977
4978         if (reload)
4979                 netdev_info(dev, "Reload driver to complete reset\n");
4980
4981         return 0;
4982 }
4983
4984 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
4985 {
4986         struct bnxt *bp = netdev_priv(dev);
4987
4988         if (dump->flag > BNXT_DUMP_CRASH) {
4989                 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
4990                 return -EINVAL;
4991         }
4992
4993         if (dump->flag == BNXT_DUMP_CRASH) {
4994                 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
4995                     (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
4996                         netdev_info(dev,
4997                                     "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
4998                         return -EOPNOTSUPP;
4999                 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
5000                         netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
5001                         return -EOPNOTSUPP;
5002                 }
5003         }
5004
5005         bp->dump_flag = dump->flag;
5006         return 0;
5007 }
5008
5009 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
5010 {
5011         struct bnxt *bp = netdev_priv(dev);
5012
5013         if (bp->hwrm_spec_code < 0x10801)
5014                 return -EOPNOTSUPP;
5015
5016         dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
5017                         bp->ver_resp.hwrm_fw_min_8b << 16 |
5018                         bp->ver_resp.hwrm_fw_bld_8b << 8 |
5019                         bp->ver_resp.hwrm_fw_rsvd_8b;
5020
5021         dump->flag = bp->dump_flag;
5022         dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
5023         return 0;
5024 }
5025
5026 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
5027                               void *buf)
5028 {
5029         struct bnxt *bp = netdev_priv(dev);
5030
5031         if (bp->hwrm_spec_code < 0x10801)
5032                 return -EOPNOTSUPP;
5033
5034         memset(buf, 0, dump->len);
5035
5036         dump->flag = bp->dump_flag;
5037         return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
5038 }
5039
5040 static int bnxt_get_ts_info(struct net_device *dev,
5041                             struct kernel_ethtool_ts_info *info)
5042 {
5043         struct bnxt *bp = netdev_priv(dev);
5044         struct bnxt_ptp_cfg *ptp;
5045
5046         ptp = bp->ptp_cfg;
5047         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
5048
5049         if (!ptp)
5050                 return 0;
5051
5052         info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
5053                                  SOF_TIMESTAMPING_RX_HARDWARE |
5054                                  SOF_TIMESTAMPING_RAW_HARDWARE;
5055         if (ptp->ptp_clock)
5056                 info->phc_index = ptp_clock_index(ptp->ptp_clock);
5057
5058         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5059
5060         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5061                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5062                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5063
5064         if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
5065                 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
5066         return 0;
5067 }
5068
5069 void bnxt_ethtool_init(struct bnxt *bp)
5070 {
5071         struct hwrm_selftest_qlist_output *resp;
5072         struct hwrm_selftest_qlist_input *req;
5073         struct bnxt_test_info *test_info;
5074         struct net_device *dev = bp->dev;
5075         int i, rc;
5076
5077         if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
5078                 bnxt_get_pkgver(dev);
5079
5080         bp->num_tests = 0;
5081         if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
5082                 return;
5083
5084         test_info = bp->test_info;
5085         if (!test_info) {
5086                 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
5087                 if (!test_info)
5088                         return;
5089                 bp->test_info = test_info;
5090         }
5091
5092         if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
5093                 return;
5094
5095         resp = hwrm_req_hold(bp, req);
5096         rc = hwrm_req_send_silent(bp, req);
5097         if (rc)
5098                 goto ethtool_init_exit;
5099
5100         bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
5101         if (bp->num_tests > BNXT_MAX_TEST)
5102                 bp->num_tests = BNXT_MAX_TEST;
5103
5104         test_info->offline_mask = resp->offline_tests;
5105         test_info->timeout = le16_to_cpu(resp->test_timeout);
5106         if (!test_info->timeout)
5107                 test_info->timeout = HWRM_CMD_TIMEOUT;
5108         for (i = 0; i < bp->num_tests; i++) {
5109                 char *str = test_info->string[i];
5110                 char *fw_str = resp->test_name[i];
5111
5112                 if (i == BNXT_MACLPBK_TEST_IDX) {
5113                         strcpy(str, "Mac loopback test (offline)");
5114                 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
5115                         strcpy(str, "Phy loopback test (offline)");
5116                 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
5117                         strcpy(str, "Ext loopback test (offline)");
5118                 } else if (i == BNXT_IRQ_TEST_IDX) {
5119                         strcpy(str, "Interrupt_test (offline)");
5120                 } else {
5121                         snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
5122                                  fw_str, test_info->offline_mask & (1 << i) ?
5123                                         "offline" : "online");
5124                 }
5125         }
5126
5127 ethtool_init_exit:
5128         hwrm_req_drop(bp, req);
5129 }
5130
5131 static void bnxt_get_eth_phy_stats(struct net_device *dev,
5132                                    struct ethtool_eth_phy_stats *phy_stats)
5133 {
5134         struct bnxt *bp = netdev_priv(dev);
5135         u64 *rx;
5136
5137         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
5138                 return;
5139
5140         rx = bp->rx_port_stats_ext.sw_stats;
5141         phy_stats->SymbolErrorDuringCarrier =
5142                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
5143 }
5144
5145 static void bnxt_get_eth_mac_stats(struct net_device *dev,
5146                                    struct ethtool_eth_mac_stats *mac_stats)
5147 {
5148         struct bnxt *bp = netdev_priv(dev);
5149         u64 *rx, *tx;
5150
5151         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5152                 return;
5153
5154         rx = bp->port_stats.sw_stats;
5155         tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5156
5157         mac_stats->FramesReceivedOK =
5158                 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
5159         mac_stats->FramesTransmittedOK =
5160                 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
5161         mac_stats->FrameCheckSequenceErrors =
5162                 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
5163         mac_stats->AlignmentErrors =
5164                 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
5165         mac_stats->OutOfRangeLengthField =
5166                 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
5167 }
5168
5169 static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
5170                                     struct ethtool_eth_ctrl_stats *ctrl_stats)
5171 {
5172         struct bnxt *bp = netdev_priv(dev);
5173         u64 *rx;
5174
5175         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5176                 return;
5177
5178         rx = bp->port_stats.sw_stats;
5179         ctrl_stats->MACControlFramesReceived =
5180                 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
5181 }
5182
5183 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
5184         {    0,    64 },
5185         {   65,   127 },
5186         {  128,   255 },
5187         {  256,   511 },
5188         {  512,  1023 },
5189         { 1024,  1518 },
5190         { 1519,  2047 },
5191         { 2048,  4095 },
5192         { 4096,  9216 },
5193         { 9217, 16383 },
5194         {}
5195 };
5196
5197 static void bnxt_get_rmon_stats(struct net_device *dev,
5198                                 struct ethtool_rmon_stats *rmon_stats,
5199                                 const struct ethtool_rmon_hist_range **ranges)
5200 {
5201         struct bnxt *bp = netdev_priv(dev);
5202         u64 *rx, *tx;
5203
5204         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5205                 return;
5206
5207         rx = bp->port_stats.sw_stats;
5208         tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5209
5210         rmon_stats->jabbers =
5211                 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
5212         rmon_stats->oversize_pkts =
5213                 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
5214         rmon_stats->undersize_pkts =
5215                 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
5216
5217         rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
5218         rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
5219         rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
5220         rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
5221         rmon_stats->hist[4] =
5222                 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
5223         rmon_stats->hist[5] =
5224                 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
5225         rmon_stats->hist[6] =
5226                 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
5227         rmon_stats->hist[7] =
5228                 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
5229         rmon_stats->hist[8] =
5230                 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
5231         rmon_stats->hist[9] =
5232                 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
5233
5234         rmon_stats->hist_tx[0] =
5235                 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
5236         rmon_stats->hist_tx[1] =
5237                 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
5238         rmon_stats->hist_tx[2] =
5239                 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
5240         rmon_stats->hist_tx[3] =
5241                 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
5242         rmon_stats->hist_tx[4] =
5243                 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
5244         rmon_stats->hist_tx[5] =
5245                 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
5246         rmon_stats->hist_tx[6] =
5247                 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
5248         rmon_stats->hist_tx[7] =
5249                 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
5250         rmon_stats->hist_tx[8] =
5251                 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
5252         rmon_stats->hist_tx[9] =
5253                 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
5254
5255         *ranges = bnxt_rmon_ranges;
5256 }
5257
5258 static void bnxt_get_ptp_stats(struct net_device *dev,
5259                                struct ethtool_ts_stats *ts_stats)
5260 {
5261         struct bnxt *bp = netdev_priv(dev);
5262         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5263
5264         if (ptp) {
5265                 ts_stats->pkts = ptp->stats.ts_pkts;
5266                 ts_stats->lost = ptp->stats.ts_lost;
5267                 ts_stats->err = atomic64_read(&ptp->stats.ts_err);
5268         }
5269 }
5270
5271 static void bnxt_get_link_ext_stats(struct net_device *dev,
5272                                     struct ethtool_link_ext_stats *stats)
5273 {
5274         struct bnxt *bp = netdev_priv(dev);
5275         u64 *rx;
5276
5277         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
5278                 return;
5279
5280         rx = bp->rx_port_stats_ext.sw_stats;
5281         stats->link_down_events =
5282                 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
5283 }
5284
5285 void bnxt_ethtool_free(struct bnxt *bp)
5286 {
5287         kfree(bp->test_info);
5288         bp->test_info = NULL;
5289 }
5290
5291 const struct ethtool_ops bnxt_ethtool_ops = {
5292         .cap_link_lanes_supported       = 1,
5293         .rxfh_per_ctx_key               = 1,
5294         .rxfh_max_num_contexts          = BNXT_MAX_ETH_RSS_CTX + 1,
5295         .rxfh_indir_space               = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
5296         .rxfh_priv_size                 = sizeof(struct bnxt_rss_ctx),
5297         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5298                                      ETHTOOL_COALESCE_MAX_FRAMES |
5299                                      ETHTOOL_COALESCE_USECS_IRQ |
5300                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
5301                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS |
5302                                      ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
5303                                      ETHTOOL_COALESCE_USE_CQE,
5304         .get_link_ksettings     = bnxt_get_link_ksettings,
5305         .set_link_ksettings     = bnxt_set_link_ksettings,
5306         .get_fec_stats          = bnxt_get_fec_stats,
5307         .get_fecparam           = bnxt_get_fecparam,
5308         .set_fecparam           = bnxt_set_fecparam,
5309         .get_pause_stats        = bnxt_get_pause_stats,
5310         .get_pauseparam         = bnxt_get_pauseparam,
5311         .set_pauseparam         = bnxt_set_pauseparam,
5312         .get_drvinfo            = bnxt_get_drvinfo,
5313         .get_regs_len           = bnxt_get_regs_len,
5314         .get_regs               = bnxt_get_regs,
5315         .get_wol                = bnxt_get_wol,
5316         .set_wol                = bnxt_set_wol,
5317         .get_coalesce           = bnxt_get_coalesce,
5318         .set_coalesce           = bnxt_set_coalesce,
5319         .get_msglevel           = bnxt_get_msglevel,
5320         .set_msglevel           = bnxt_set_msglevel,
5321         .get_sset_count         = bnxt_get_sset_count,
5322         .get_strings            = bnxt_get_strings,
5323         .get_ethtool_stats      = bnxt_get_ethtool_stats,
5324         .set_ringparam          = bnxt_set_ringparam,
5325         .get_ringparam          = bnxt_get_ringparam,
5326         .get_channels           = bnxt_get_channels,
5327         .set_channels           = bnxt_set_channels,
5328         .get_rxnfc              = bnxt_get_rxnfc,
5329         .set_rxnfc              = bnxt_set_rxnfc,
5330         .get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
5331         .get_rxfh_key_size      = bnxt_get_rxfh_key_size,
5332         .get_rxfh               = bnxt_get_rxfh,
5333         .set_rxfh               = bnxt_set_rxfh,
5334         .create_rxfh_context    = bnxt_create_rxfh_context,
5335         .modify_rxfh_context    = bnxt_modify_rxfh_context,
5336         .remove_rxfh_context    = bnxt_remove_rxfh_context,
5337         .flash_device           = bnxt_flash_device,
5338         .get_eeprom_len         = bnxt_get_eeprom_len,
5339         .get_eeprom             = bnxt_get_eeprom,
5340         .set_eeprom             = bnxt_set_eeprom,
5341         .get_link               = bnxt_get_link,
5342         .get_link_ext_stats     = bnxt_get_link_ext_stats,
5343         .get_eee                = bnxt_get_eee,
5344         .set_eee                = bnxt_set_eee,
5345         .get_module_info        = bnxt_get_module_info,
5346         .get_module_eeprom      = bnxt_get_module_eeprom,
5347         .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
5348         .nway_reset             = bnxt_nway_reset,
5349         .set_phys_id            = bnxt_set_phys_id,
5350         .self_test              = bnxt_self_test,
5351         .get_ts_info            = bnxt_get_ts_info,
5352         .reset                  = bnxt_reset,
5353         .set_dump               = bnxt_set_dump,
5354         .get_dump_flag          = bnxt_get_dump_flag,
5355         .get_dump_data          = bnxt_get_dump_data,
5356         .get_eth_phy_stats      = bnxt_get_eth_phy_stats,
5357         .get_eth_mac_stats      = bnxt_get_eth_mac_stats,
5358         .get_eth_ctrl_stats     = bnxt_get_eth_ctrl_stats,
5359         .get_rmon_stats         = bnxt_get_rmon_stats,
5360         .get_ts_stats           = bnxt_get_ptp_stats,
5361 };