treewide: Use fallthrough pseudo-keyword
[linux-block.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_ethtool.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
8e202366 4 * Copyright (c) 2016-2017 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
3ebf6f0a 11#include <linux/ctype.h>
8ddc9aaa 12#include <linux/stringify.h>
c0c050c5
MC
13#include <linux/ethtool.h>
14#include <linux/interrupt.h>
15#include <linux/pci.h>
16#include <linux/etherdevice.h>
17#include <linux/crc32.h>
18#include <linux/firmware.h>
6c5657d0
VV
19#include <linux/utsname.h>
20#include <linux/time.h>
c0c050c5
MC
21#include "bnxt_hsi.h"
22#include "bnxt.h"
f7dc1ea6 23#include "bnxt_xdp.h"
c0c050c5
MC
24#include "bnxt_ethtool.h"
25#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
26#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
6c5657d0 27#include "bnxt_coredump.h"
c0c050c5 28#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
5ac67d8b
RS
29#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
30#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
c0c050c5
MC
31
32static u32 bnxt_get_msglevel(struct net_device *dev)
33{
34 struct bnxt *bp = netdev_priv(dev);
35
36 return bp->msg_enable;
37}
38
39static void bnxt_set_msglevel(struct net_device *dev, u32 value)
40{
41 struct bnxt *bp = netdev_priv(dev);
42
43 bp->msg_enable = value;
44}
45
46static int bnxt_get_coalesce(struct net_device *dev,
47 struct ethtool_coalesce *coal)
48{
49 struct bnxt *bp = netdev_priv(dev);
18775aa8
MC
50 struct bnxt_coal *hw_coal;
51 u16 mult;
c0c050c5
MC
52
53 memset(coal, 0, sizeof(*coal));
54
6a8788f2
AG
55 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
56
18775aa8
MC
57 hw_coal = &bp->rx_coal;
58 mult = hw_coal->bufs_per_record;
59 coal->rx_coalesce_usecs = hw_coal->coal_ticks;
60 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
61 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
62 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
c0c050c5 63
18775aa8
MC
64 hw_coal = &bp->tx_coal;
65 mult = hw_coal->bufs_per_record;
66 coal->tx_coalesce_usecs = hw_coal->coal_ticks;
67 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
68 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
69 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
dfc9c94a 70
51f30785
MC
71 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
72
c0c050c5
MC
73 return 0;
74}
75
76static int bnxt_set_coalesce(struct net_device *dev,
77 struct ethtool_coalesce *coal)
78{
79 struct bnxt *bp = netdev_priv(dev);
51f30785 80 bool update_stats = false;
18775aa8 81 struct bnxt_coal *hw_coal;
c0c050c5 82 int rc = 0;
18775aa8
MC
83 u16 mult;
84
6a8788f2
AG
85 if (coal->use_adaptive_rx_coalesce) {
86 bp->flags |= BNXT_FLAG_DIM;
87 } else {
88 if (bp->flags & BNXT_FLAG_DIM) {
89 bp->flags &= ~(BNXT_FLAG_DIM);
90 goto reset_coalesce;
91 }
92 }
93
18775aa8
MC
94 hw_coal = &bp->rx_coal;
95 mult = hw_coal->bufs_per_record;
96 hw_coal->coal_ticks = coal->rx_coalesce_usecs;
97 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
98 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
99 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
100
de4a10ef 101 hw_coal = &bp->tx_coal;
18775aa8
MC
102 mult = hw_coal->bufs_per_record;
103 hw_coal->coal_ticks = coal->tx_coalesce_usecs;
104 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
105 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
106 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
dfc9c94a 107
51f30785
MC
108 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
109 u32 stats_ticks = coal->stats_block_coalesce_usecs;
110
adcc331e
MC
111 /* Allow 0, which means disable. */
112 if (stats_ticks)
113 stats_ticks = clamp_t(u32, stats_ticks,
114 BNXT_MIN_STATS_COAL_TICKS,
115 BNXT_MAX_STATS_COAL_TICKS);
51f30785
MC
116 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
117 bp->stats_coal_ticks = stats_ticks;
e795892e
MC
118 if (bp->stats_coal_ticks)
119 bp->current_interval =
120 bp->stats_coal_ticks * HZ / 1000000;
121 else
122 bp->current_interval = BNXT_TIMER_INTERVAL;
51f30785
MC
123 update_stats = true;
124 }
125
6a8788f2 126reset_coalesce:
51f30785
MC
127 if (netif_running(dev)) {
128 if (update_stats) {
129 rc = bnxt_close_nic(bp, true, false);
130 if (!rc)
131 rc = bnxt_open_nic(bp, true, false);
132 } else {
133 rc = bnxt_hwrm_set_coal(bp);
134 }
135 }
c0c050c5
MC
136
137 return rc;
138}
139
3316d509 140static const char * const bnxt_ring_rx_stats_str[] = {
ee79566e
MC
141 "rx_ucast_packets",
142 "rx_mcast_packets",
143 "rx_bcast_packets",
144 "rx_discards",
bfc6e5fb 145 "rx_errors",
ee79566e
MC
146 "rx_ucast_bytes",
147 "rx_mcast_bytes",
148 "rx_bcast_bytes",
3316d509
MC
149};
150
151static const char * const bnxt_ring_tx_stats_str[] = {
ee79566e
MC
152 "tx_ucast_packets",
153 "tx_mcast_packets",
154 "tx_bcast_packets",
bfc6e5fb 155 "tx_errors",
ee79566e 156 "tx_discards",
ee79566e
MC
157 "tx_ucast_bytes",
158 "tx_mcast_bytes",
159 "tx_bcast_bytes",
160};
161
162static const char * const bnxt_ring_tpa_stats_str[] = {
163 "tpa_packets",
164 "tpa_bytes",
165 "tpa_events",
166 "tpa_aborts",
167};
168
78e7b866
MC
169static const char * const bnxt_ring_tpa2_stats_str[] = {
170 "rx_tpa_eligible_pkt",
171 "rx_tpa_eligible_bytes",
172 "rx_tpa_pkt",
173 "rx_tpa_bytes",
174 "rx_tpa_errors",
175};
176
9d8b5f05 177static const char * const bnxt_rx_sw_stats_str[] = {
ee79566e 178 "rx_l4_csum_errors",
19b3751f 179 "rx_buf_errors",
9d8b5f05
MC
180};
181
182static const char * const bnxt_cmn_sw_stats_str[] = {
ee79566e
MC
183 "missed_irqs",
184};
c0c050c5 185
8ddc9aaa
MC
186#define BNXT_RX_STATS_ENTRY(counter) \
187 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
188
8ddc9aaa
MC
189#define BNXT_TX_STATS_ENTRY(counter) \
190 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
191
00db3cba
VV
192#define BNXT_RX_STATS_EXT_ENTRY(counter) \
193 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
194
36e53349
MC
195#define BNXT_TX_STATS_EXT_ENTRY(counter) \
196 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
197
198#define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
199 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
200 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
201
202#define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
203 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
204 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
205
206#define BNXT_RX_STATS_EXT_PFC_ENTRIES \
207 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
208 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
209 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
210 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
211 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
212 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
213 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
214 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
215
216#define BNXT_TX_STATS_EXT_PFC_ENTRIES \
217 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
218 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
219 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
220 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
221 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
222 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
223 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
224 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
225
226#define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
227 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
228 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
229
230#define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
231 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
232 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
233
234#define BNXT_RX_STATS_EXT_COS_ENTRIES \
235 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
236 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
237 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
238 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
239 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
240 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
241 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
242 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
243
244#define BNXT_TX_STATS_EXT_COS_ENTRIES \
245 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
246 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
247 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
248 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
249 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
250 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
251 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
252 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
253
2792b5b9
MC
254#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
255 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
256 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
257
258#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
259 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
260 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
261 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
262 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
263 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
264 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
265 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
266 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
267
e37fed79
MC
268#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
269 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
270 __stringify(counter##_pri##n) }
271
272#define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
273 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
274 __stringify(counter##_pri##n) }
275
276#define BNXT_RX_STATS_PRI_ENTRIES(counter) \
277 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
278 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
279 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
280 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
281 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
282 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
283 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
284 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
285
286#define BNXT_TX_STATS_PRI_ENTRIES(counter) \
287 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
288 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
289 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
290 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
291 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
292 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
293 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
294 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
295
20c1d28e
VV
296enum {
297 RX_TOTAL_DISCARDS,
298 TX_TOTAL_DISCARDS,
299};
300
301static struct {
302 u64 counter;
303 char string[ETH_GSTRING_LEN];
304} bnxt_sw_func_stats[] = {
305 {0, "rx_total_discard_pkts"},
306 {0, "tx_total_discard_pkts"},
307};
308
3316d509
MC
309#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
310#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
311#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
312#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
313
8ddc9aaa
MC
314static const struct {
315 long offset;
316 char string[ETH_GSTRING_LEN];
317} bnxt_port_stats_arr[] = {
318 BNXT_RX_STATS_ENTRY(rx_64b_frames),
319 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
320 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
321 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
322 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
6fc92c33 323 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
8ddc9aaa
MC
324 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
325 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
326 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
327 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
328 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
329 BNXT_RX_STATS_ENTRY(rx_total_frames),
330 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
331 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
332 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
333 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
334 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
335 BNXT_RX_STATS_ENTRY(rx_pause_frames),
336 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
337 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
338 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
339 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
340 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
341 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
342 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
343 BNXT_RX_STATS_ENTRY(rx_good_frames),
c77192f2
MC
344 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
345 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
346 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
347 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
348 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
349 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
350 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
351 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
8ddc9aaa
MC
352 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
353 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
354 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
355 BNXT_RX_STATS_ENTRY(rx_bytes),
356 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
357 BNXT_RX_STATS_ENTRY(rx_runt_frames),
699efed0
VV
358 BNXT_RX_STATS_ENTRY(rx_stat_discard),
359 BNXT_RX_STATS_ENTRY(rx_stat_err),
8ddc9aaa
MC
360
361 BNXT_TX_STATS_ENTRY(tx_64b_frames),
362 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
363 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
364 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
365 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
6fc92c33 366 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
8ddc9aaa 367 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
6fc92c33 368 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
8ddc9aaa
MC
369 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
370 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
371 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
372 BNXT_TX_STATS_ENTRY(tx_good_frames),
373 BNXT_TX_STATS_ENTRY(tx_total_frames),
374 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
375 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
376 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
377 BNXT_TX_STATS_ENTRY(tx_pause_frames),
378 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
379 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
380 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
381 BNXT_TX_STATS_ENTRY(tx_err),
382 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
c77192f2
MC
383 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
384 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
385 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
386 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
387 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
388 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
389 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
390 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
8ddc9aaa
MC
391 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
392 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
393 BNXT_TX_STATS_ENTRY(tx_total_collisions),
394 BNXT_TX_STATS_ENTRY(tx_bytes),
699efed0
VV
395 BNXT_TX_STATS_ENTRY(tx_xthol_frames),
396 BNXT_TX_STATS_ENTRY(tx_stat_discard),
397 BNXT_TX_STATS_ENTRY(tx_stat_error),
8ddc9aaa
MC
398};
399
00db3cba
VV
400static const struct {
401 long offset;
402 char string[ETH_GSTRING_LEN];
403} bnxt_port_stats_ext_arr[] = {
404 BNXT_RX_STATS_EXT_ENTRY(link_down_events),
405 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
406 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
407 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
408 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
36e53349
MC
409 BNXT_RX_STATS_EXT_COS_ENTRIES,
410 BNXT_RX_STATS_EXT_PFC_ENTRIES,
4a50ddc2
MC
411 BNXT_RX_STATS_EXT_ENTRY(rx_bits),
412 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
413 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
414 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
2792b5b9 415 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
36e53349
MC
416};
417
418static const struct {
419 long offset;
420 char string[ETH_GSTRING_LEN];
421} bnxt_tx_port_stats_ext_arr[] = {
422 BNXT_TX_STATS_EXT_COS_ENTRIES,
423 BNXT_TX_STATS_EXT_PFC_ENTRIES,
00db3cba
VV
424};
425
e37fed79
MC
426static const struct {
427 long base_off;
428 char string[ETH_GSTRING_LEN];
429} bnxt_rx_bytes_pri_arr[] = {
430 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
431};
432
433static const struct {
434 long base_off;
435 char string[ETH_GSTRING_LEN];
436} bnxt_rx_pkts_pri_arr[] = {
437 BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
438};
439
440static const struct {
441 long base_off;
442 char string[ETH_GSTRING_LEN];
443} bnxt_tx_bytes_pri_arr[] = {
444 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
445};
446
447static const struct {
448 long base_off;
449 char string[ETH_GSTRING_LEN];
450} bnxt_tx_pkts_pri_arr[] = {
451 BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
452};
453
20c1d28e 454#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
8ddc9aaa 455#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
e37fed79
MC
456#define BNXT_NUM_STATS_PRI \
457 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
458 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
459 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
460 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
8ddc9aaa 461
78e7b866
MC
462static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
463{
464 if (BNXT_SUPPORTS_TPA(bp)) {
465 if (bp->max_tpa_v2)
466 return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
467 return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
468 }
469 return 0;
470}
471
ee79566e
MC
472static int bnxt_get_num_ring_stats(struct bnxt *bp)
473{
3316d509 474 int rx, tx, cmn;
125592fb
RR
475 bool sh = false;
476
477 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
478 sh = true;
ee79566e 479
3316d509
MC
480 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
481 bnxt_get_num_tpa_ring_stats(bp);
482 tx = NUM_RING_TX_HW_STATS;
483 cmn = NUM_RING_CMN_SW_STATS;
125592fb
RR
484 if (sh)
485 return (rx + tx + cmn) * bp->cp_nr_rings;
486 else
487 return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
488 cmn * bp->cp_nr_rings;
ee79566e
MC
489}
490
5c8227d0
MC
491static int bnxt_get_num_stats(struct bnxt *bp)
492{
ee79566e 493 int num_stats = bnxt_get_num_ring_stats(bp);
5c8227d0 494
20c1d28e
VV
495 num_stats += BNXT_NUM_SW_FUNC_STATS;
496
5c8227d0
MC
497 if (bp->flags & BNXT_FLAG_PORT_STATS)
498 num_stats += BNXT_NUM_PORT_STATS;
499
e37fed79 500 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
36e53349
MC
501 num_stats += bp->fw_rx_stats_ext_size +
502 bp->fw_tx_stats_ext_size;
e37fed79
MC
503 if (bp->pri2cos_valid)
504 num_stats += BNXT_NUM_STATS_PRI;
505 }
00db3cba 506
5c8227d0
MC
507 return num_stats;
508}
509
c0c050c5
MC
510static int bnxt_get_sset_count(struct net_device *dev, int sset)
511{
512 struct bnxt *bp = netdev_priv(dev);
513
514 switch (sset) {
5c8227d0
MC
515 case ETH_SS_STATS:
516 return bnxt_get_num_stats(bp);
eb513658
MC
517 case ETH_SS_TEST:
518 if (!bp->num_tests)
519 return -EOPNOTSUPP;
520 return bp->num_tests;
c0c050c5
MC
521 default:
522 return -EOPNOTSUPP;
523 }
524}
525
125592fb
RR
526static bool is_rx_ring(struct bnxt *bp, int ring_num)
527{
528 return ring_num < bp->rx_nr_rings;
529}
530
531static bool is_tx_ring(struct bnxt *bp, int ring_num)
532{
533 int tx_base = 0;
534
535 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
536 tx_base = bp->rx_nr_rings;
537
538 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
539 return true;
540 return false;
541}
542
c0c050c5
MC
543static void bnxt_get_ethtool_stats(struct net_device *dev,
544 struct ethtool_stats *stats, u64 *buf)
545{
546 u32 i, j = 0;
547 struct bnxt *bp = netdev_priv(dev);
125592fb 548 u32 tpa_stats;
c0c050c5 549
fd3ab1c7 550 if (!bp->bnapi) {
ee79566e 551 j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
fd3ab1c7
MC
552 goto skip_ring_stats;
553 }
c0c050c5 554
20c1d28e
VV
555 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
556 bnxt_sw_func_stats[i].counter = 0;
557
125592fb 558 tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
c0c050c5
MC
559 for (i = 0; i < bp->cp_nr_rings; i++) {
560 struct bnxt_napi *bnapi = bp->bnapi[i];
561 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
a0c30621 562 u64 *sw_stats = cpr->stats.sw_stats;
9d8b5f05 563 u64 *sw;
c0c050c5
MC
564 int k;
565
125592fb
RR
566 if (is_rx_ring(bp, i)) {
567 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
a0c30621 568 buf[j] = sw_stats[k];
125592fb
RR
569 }
570 if (is_tx_ring(bp, i)) {
571 k = NUM_RING_RX_HW_STATS;
572 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
573 j++, k++)
a0c30621 574 buf[j] = sw_stats[k];
125592fb
RR
575 }
576 if (!tpa_stats || !is_rx_ring(bp, i))
577 goto skip_tpa_ring_stats;
578
579 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
580 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
581 tpa_stats; j++, k++)
a0c30621 582 buf[j] = sw_stats[k];
9d8b5f05 583
125592fb 584skip_tpa_ring_stats:
9d8b5f05 585 sw = (u64 *)&cpr->sw_stats.rx;
125592fb
RR
586 if (is_rx_ring(bp, i)) {
587 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
588 buf[j] = sw[k];
589 }
9d8b5f05
MC
590
591 sw = (u64 *)&cpr->sw_stats.cmn;
3316d509 592 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
9d8b5f05 593 buf[j] = sw[k];
20c1d28e
VV
594
595 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
a0c30621 596 BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
20c1d28e 597 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
a0c30621 598 BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
c0c050c5 599 }
20c1d28e
VV
600
601 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
602 buf[j] = bnxt_sw_func_stats[i].counter;
603
fd3ab1c7 604skip_ring_stats:
8ddc9aaa 605 if (bp->flags & BNXT_FLAG_PORT_STATS) {
a0c30621 606 u64 *port_stats = bp->port_stats.sw_stats;
8ddc9aaa 607
a0c30621
MC
608 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
609 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
8ddc9aaa 610 }
00db3cba 611 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
a0c30621
MC
612 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
613 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
00db3cba 614
36e53349 615 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
a0c30621
MC
616 buf[j] = *(rx_port_stats_ext +
617 bnxt_port_stats_ext_arr[i].offset);
00db3cba 618 }
36e53349 619 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
a0c30621
MC
620 buf[j] = *(tx_port_stats_ext +
621 bnxt_tx_port_stats_ext_arr[i].offset);
36e53349 622 }
e37fed79
MC
623 if (bp->pri2cos_valid) {
624 for (i = 0; i < 8; i++, j++) {
625 long n = bnxt_rx_bytes_pri_arr[i].base_off +
a24ec322 626 bp->pri2cos_idx[i];
e37fed79 627
a0c30621 628 buf[j] = *(rx_port_stats_ext + n);
e37fed79
MC
629 }
630 for (i = 0; i < 8; i++, j++) {
631 long n = bnxt_rx_pkts_pri_arr[i].base_off +
a24ec322 632 bp->pri2cos_idx[i];
e37fed79 633
a0c30621 634 buf[j] = *(rx_port_stats_ext + n);
e37fed79
MC
635 }
636 for (i = 0; i < 8; i++, j++) {
637 long n = bnxt_tx_bytes_pri_arr[i].base_off +
a24ec322 638 bp->pri2cos_idx[i];
e37fed79 639
a0c30621 640 buf[j] = *(tx_port_stats_ext + n);
e37fed79
MC
641 }
642 for (i = 0; i < 8; i++, j++) {
643 long n = bnxt_tx_pkts_pri_arr[i].base_off +
a24ec322 644 bp->pri2cos_idx[i];
e37fed79 645
a0c30621 646 buf[j] = *(tx_port_stats_ext + n);
e37fed79
MC
647 }
648 }
00db3cba 649 }
c0c050c5
MC
650}
651
652static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
653{
654 struct bnxt *bp = netdev_priv(dev);
78e7b866 655 static const char * const *str;
ee79566e 656 u32 i, j, num_str;
c0c050c5
MC
657
658 switch (stringset) {
c0c050c5
MC
659 case ETH_SS_STATS:
660 for (i = 0; i < bp->cp_nr_rings; i++) {
125592fb
RR
661 if (is_rx_ring(bp, i)) {
662 num_str = NUM_RING_RX_HW_STATS;
663 for (j = 0; j < num_str; j++) {
664 sprintf(buf, "[%d]: %s", i,
665 bnxt_ring_rx_stats_str[j]);
666 buf += ETH_GSTRING_LEN;
667 }
ee79566e 668 }
125592fb
RR
669 if (is_tx_ring(bp, i)) {
670 num_str = NUM_RING_TX_HW_STATS;
671 for (j = 0; j < num_str; j++) {
672 sprintf(buf, "[%d]: %s", i,
673 bnxt_ring_tx_stats_str[j]);
674 buf += ETH_GSTRING_LEN;
675 }
3316d509
MC
676 }
677 num_str = bnxt_get_num_tpa_ring_stats(bp);
125592fb 678 if (!num_str || !is_rx_ring(bp, i))
ee79566e
MC
679 goto skip_tpa_stats;
680
3316d509 681 if (bp->max_tpa_v2)
78e7b866 682 str = bnxt_ring_tpa2_stats_str;
3316d509 683 else
78e7b866 684 str = bnxt_ring_tpa_stats_str;
3316d509 685
ee79566e 686 for (j = 0; j < num_str; j++) {
78e7b866 687 sprintf(buf, "[%d]: %s", i, str[j]);
ee79566e
MC
688 buf += ETH_GSTRING_LEN;
689 }
690skip_tpa_stats:
125592fb
RR
691 if (is_rx_ring(bp, i)) {
692 num_str = NUM_RING_RX_SW_STATS;
693 for (j = 0; j < num_str; j++) {
694 sprintf(buf, "[%d]: %s", i,
695 bnxt_rx_sw_stats_str[j]);
696 buf += ETH_GSTRING_LEN;
697 }
9d8b5f05 698 }
3316d509 699 num_str = NUM_RING_CMN_SW_STATS;
ee79566e
MC
700 for (j = 0; j < num_str; j++) {
701 sprintf(buf, "[%d]: %s", i,
9d8b5f05 702 bnxt_cmn_sw_stats_str[j]);
ee79566e
MC
703 buf += ETH_GSTRING_LEN;
704 }
c0c050c5 705 }
20c1d28e
VV
706 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
707 strcpy(buf, bnxt_sw_func_stats[i].string);
708 buf += ETH_GSTRING_LEN;
709 }
710
8ddc9aaa
MC
711 if (bp->flags & BNXT_FLAG_PORT_STATS) {
712 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
713 strcpy(buf, bnxt_port_stats_arr[i].string);
714 buf += ETH_GSTRING_LEN;
715 }
716 }
00db3cba 717 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
36e53349 718 for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
00db3cba
VV
719 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
720 buf += ETH_GSTRING_LEN;
721 }
36e53349
MC
722 for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
723 strcpy(buf,
724 bnxt_tx_port_stats_ext_arr[i].string);
725 buf += ETH_GSTRING_LEN;
726 }
e37fed79
MC
727 if (bp->pri2cos_valid) {
728 for (i = 0; i < 8; i++) {
729 strcpy(buf,
730 bnxt_rx_bytes_pri_arr[i].string);
731 buf += ETH_GSTRING_LEN;
732 }
733 for (i = 0; i < 8; i++) {
734 strcpy(buf,
735 bnxt_rx_pkts_pri_arr[i].string);
736 buf += ETH_GSTRING_LEN;
737 }
738 for (i = 0; i < 8; i++) {
739 strcpy(buf,
740 bnxt_tx_bytes_pri_arr[i].string);
741 buf += ETH_GSTRING_LEN;
742 }
743 for (i = 0; i < 8; i++) {
744 strcpy(buf,
745 bnxt_tx_pkts_pri_arr[i].string);
746 buf += ETH_GSTRING_LEN;
747 }
748 }
00db3cba 749 }
c0c050c5 750 break;
eb513658
MC
751 case ETH_SS_TEST:
752 if (bp->num_tests)
753 memcpy(buf, bp->test_info->string,
754 bp->num_tests * ETH_GSTRING_LEN);
755 break;
c0c050c5
MC
756 default:
757 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
758 stringset);
759 break;
760 }
761}
762
763static void bnxt_get_ringparam(struct net_device *dev,
764 struct ethtool_ringparam *ering)
765{
766 struct bnxt *bp = netdev_priv(dev);
767
768 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
769 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
770 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
771
772 ering->rx_pending = bp->rx_ring_size;
773 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
774 ering->tx_pending = bp->tx_ring_size;
775}
776
777static int bnxt_set_ringparam(struct net_device *dev,
778 struct ethtool_ringparam *ering)
779{
780 struct bnxt *bp = netdev_priv(dev);
781
782 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
783 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
784 (ering->tx_pending <= MAX_SKB_FRAGS))
785 return -EINVAL;
786
787 if (netif_running(dev))
788 bnxt_close_nic(bp, false, false);
789
790 bp->rx_ring_size = ering->rx_pending;
791 bp->tx_ring_size = ering->tx_pending;
792 bnxt_set_ring_params(bp);
793
794 if (netif_running(dev))
795 return bnxt_open_nic(bp, false, false);
796
797 return 0;
798}
799
800static void bnxt_get_channels(struct net_device *dev,
801 struct ethtool_channels *channel)
802{
803 struct bnxt *bp = netdev_priv(dev);
db4723b3 804 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0c050c5 805 int max_rx_rings, max_tx_rings, tcs;
db4723b3
MC
806 int max_tx_sch_inputs;
807
808 /* Get the most up-to-date max_tx_sch_inputs. */
f1ca94de 809 if (BNXT_NEW_RM(bp))
db4723b3
MC
810 bnxt_hwrm_func_resc_qcaps(bp, false);
811 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
c0c050c5 812
6e6c5a57 813 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
db4723b3
MC
814 if (max_tx_sch_inputs)
815 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
a79a5276 816 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
068c9ec6 817
18d6e4e2
SB
818 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
819 max_rx_rings = 0;
820 max_tx_rings = 0;
821 }
db4723b3
MC
822 if (max_tx_sch_inputs)
823 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
18d6e4e2 824
c0c050c5
MC
825 tcs = netdev_get_num_tc(dev);
826 if (tcs > 1)
827 max_tx_rings /= tcs;
828
829 channel->max_rx = max_rx_rings;
830 channel->max_tx = max_tx_rings;
831 channel->max_other = 0;
068c9ec6
MC
832 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
833 channel->combined_count = bp->rx_nr_rings;
76595193
PS
834 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
835 channel->combined_count--;
068c9ec6 836 } else {
76595193
PS
837 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
838 channel->rx_count = bp->rx_nr_rings;
839 channel->tx_count = bp->tx_nr_rings_per_tc;
840 }
068c9ec6 841 }
c0c050c5
MC
842}
843
844static int bnxt_set_channels(struct net_device *dev,
845 struct ethtool_channels *channel)
846{
847 struct bnxt *bp = netdev_priv(dev);
d1e7925e 848 int req_tx_rings, req_rx_rings, tcs;
068c9ec6 849 bool sh = false;
5f449249 850 int tx_xdp = 0;
d1e7925e 851 int rc = 0;
c0c050c5 852
068c9ec6 853 if (channel->other_count)
c0c050c5
MC
854 return -EINVAL;
855
068c9ec6
MC
856 if (!channel->combined_count &&
857 (!channel->rx_count || !channel->tx_count))
858 return -EINVAL;
859
860 if (channel->combined_count &&
861 (channel->rx_count || channel->tx_count))
862 return -EINVAL;
863
76595193
PS
864 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
865 channel->tx_count))
866 return -EINVAL;
867
068c9ec6
MC
868 if (channel->combined_count)
869 sh = true;
870
c0c050c5 871 tcs = netdev_get_num_tc(dev);
c0c050c5 872
391be5c2 873 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
d1e7925e 874 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
5f449249
MC
875 if (bp->tx_nr_rings_xdp) {
876 if (!sh) {
877 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
878 return -EINVAL;
879 }
880 tx_xdp = req_rx_rings;
881 }
98fdbe73 882 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
d1e7925e
MC
883 if (rc) {
884 netdev_warn(dev, "Unable to allocate the requested rings\n");
885 return rc;
391be5c2
MC
886 }
887
bd3191b5
MC
888 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
889 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
890 (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
891 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
892 return -EINVAL;
893 }
894
c0c050c5
MC
895 if (netif_running(dev)) {
896 if (BNXT_PF(bp)) {
897 /* TODO CHIMP_FW: Send message to all VF's
898 * before PF unload
899 */
900 }
901 rc = bnxt_close_nic(bp, true, false);
902 if (rc) {
903 netdev_err(bp->dev, "Set channel failure rc :%x\n",
904 rc);
905 return rc;
906 }
907 }
908
068c9ec6
MC
909 if (sh) {
910 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d1e7925e
MC
911 bp->rx_nr_rings = channel->combined_count;
912 bp->tx_nr_rings_per_tc = channel->combined_count;
068c9ec6
MC
913 } else {
914 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
915 bp->rx_nr_rings = channel->rx_count;
916 bp->tx_nr_rings_per_tc = channel->tx_count;
917 }
5f449249
MC
918 bp->tx_nr_rings_xdp = tx_xdp;
919 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
c0c050c5 920 if (tcs > 1)
5f449249 921 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
068c9ec6
MC
922
923 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
924 bp->tx_nr_rings + bp->rx_nr_rings;
925
2bcfa6f6
MC
926 /* After changing number of rx channels, update NTUPLE feature. */
927 netdev_update_features(dev);
c0c050c5
MC
928 if (netif_running(dev)) {
929 rc = bnxt_open_nic(bp, true, false);
930 if ((!rc) && BNXT_PF(bp)) {
931 /* TODO CHIMP_FW: Send message to all VF's
932 * to renable
933 */
934 }
d8c09f19 935 } else {
1b3f0b75 936 rc = bnxt_reserve_rings(bp, true);
c0c050c5
MC
937 }
938
939 return rc;
940}
941
942#ifdef CONFIG_RFS_ACCEL
943static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
944 u32 *rule_locs)
945{
946 int i, j = 0;
947
948 cmd->data = bp->ntp_fltr_count;
949 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
950 struct hlist_head *head;
951 struct bnxt_ntuple_filter *fltr;
952
953 head = &bp->ntp_fltr_hash_tbl[i];
954 rcu_read_lock();
955 hlist_for_each_entry_rcu(fltr, head, hash) {
956 if (j == cmd->rule_cnt)
957 break;
958 rule_locs[j++] = fltr->sw_id;
959 }
960 rcu_read_unlock();
961 if (j == cmd->rule_cnt)
962 break;
963 }
964 cmd->rule_cnt = j;
965 return 0;
966}
967
968static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
969{
970 struct ethtool_rx_flow_spec *fs =
971 (struct ethtool_rx_flow_spec *)&cmd->fs;
972 struct bnxt_ntuple_filter *fltr;
973 struct flow_keys *fkeys;
974 int i, rc = -EINVAL;
975
b721cfaf 976 if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
c0c050c5
MC
977 return rc;
978
979 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
980 struct hlist_head *head;
981
982 head = &bp->ntp_fltr_hash_tbl[i];
983 rcu_read_lock();
984 hlist_for_each_entry_rcu(fltr, head, hash) {
985 if (fltr->sw_id == fs->location)
986 goto fltr_found;
987 }
988 rcu_read_unlock();
989 }
990 return rc;
991
992fltr_found:
993 fkeys = &fltr->fkeys;
dda0e746
MC
994 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
995 if (fkeys->basic.ip_proto == IPPROTO_TCP)
996 fs->flow_type = TCP_V4_FLOW;
997 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
998 fs->flow_type = UDP_V4_FLOW;
999 else
1000 goto fltr_err;
c0c050c5 1001
dda0e746
MC
1002 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1003 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
c0c050c5 1004
dda0e746
MC
1005 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1006 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
c0c050c5 1007
dda0e746
MC
1008 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1009 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
c0c050c5 1010
dda0e746
MC
1011 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1012 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1013 } else {
1014 int i;
1015
1016 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1017 fs->flow_type = TCP_V6_FLOW;
1018 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1019 fs->flow_type = UDP_V6_FLOW;
1020 else
1021 goto fltr_err;
1022
1023 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1024 fkeys->addrs.v6addrs.src;
1025 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1026 fkeys->addrs.v6addrs.dst;
1027 for (i = 0; i < 4; i++) {
1028 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
1029 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
1030 }
1031 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1032 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1033
1034 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1035 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1036 }
c0c050c5
MC
1037
1038 fs->ring_cookie = fltr->rxq;
1039 rc = 0;
1040
1041fltr_err:
1042 rcu_read_unlock();
1043
1044 return rc;
1045}
a011952a
MC
1046#endif
1047
1048static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1049{
1050 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1051 return RXH_IP_SRC | RXH_IP_DST;
1052 return 0;
1053}
1054
1055static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1056{
1057 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1058 return RXH_IP_SRC | RXH_IP_DST;
1059 return 0;
1060}
1061
1062static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1063{
1064 cmd->data = 0;
1065 switch (cmd->flow_type) {
1066 case TCP_V4_FLOW:
1067 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1068 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1069 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1070 cmd->data |= get_ethtool_ipv4_rss(bp);
1071 break;
1072 case UDP_V4_FLOW:
1073 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1074 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1075 RXH_L4_B_0_1 | RXH_L4_B_2_3;
df561f66 1076 fallthrough;
a011952a
MC
1077 case SCTP_V4_FLOW:
1078 case AH_ESP_V4_FLOW:
1079 case AH_V4_FLOW:
1080 case ESP_V4_FLOW:
1081 case IPV4_FLOW:
1082 cmd->data |= get_ethtool_ipv4_rss(bp);
1083 break;
1084
1085 case TCP_V6_FLOW:
1086 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1087 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1088 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1089 cmd->data |= get_ethtool_ipv6_rss(bp);
1090 break;
1091 case UDP_V6_FLOW:
1092 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1093 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1094 RXH_L4_B_0_1 | RXH_L4_B_2_3;
df561f66 1095 fallthrough;
a011952a
MC
1096 case SCTP_V6_FLOW:
1097 case AH_ESP_V6_FLOW:
1098 case AH_V6_FLOW:
1099 case ESP_V6_FLOW:
1100 case IPV6_FLOW:
1101 cmd->data |= get_ethtool_ipv6_rss(bp);
1102 break;
1103 }
1104 return 0;
1105}
1106
1107#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1108#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1109
1110static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1111{
1112 u32 rss_hash_cfg = bp->rss_hash_cfg;
1113 int tuple, rc = 0;
1114
1115 if (cmd->data == RXH_4TUPLE)
1116 tuple = 4;
1117 else if (cmd->data == RXH_2TUPLE)
1118 tuple = 2;
1119 else if (!cmd->data)
1120 tuple = 0;
1121 else
1122 return -EINVAL;
1123
1124 if (cmd->flow_type == TCP_V4_FLOW) {
1125 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1126 if (tuple == 4)
1127 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1128 } else if (cmd->flow_type == UDP_V4_FLOW) {
1129 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1130 return -EINVAL;
1131 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1132 if (tuple == 4)
1133 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1134 } else if (cmd->flow_type == TCP_V6_FLOW) {
1135 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1136 if (tuple == 4)
1137 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1138 } else if (cmd->flow_type == UDP_V6_FLOW) {
1139 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1140 return -EINVAL;
1141 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1142 if (tuple == 4)
1143 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1144 } else if (tuple == 4) {
1145 return -EINVAL;
1146 }
1147
1148 switch (cmd->flow_type) {
1149 case TCP_V4_FLOW:
1150 case UDP_V4_FLOW:
1151 case SCTP_V4_FLOW:
1152 case AH_ESP_V4_FLOW:
1153 case AH_V4_FLOW:
1154 case ESP_V4_FLOW:
1155 case IPV4_FLOW:
1156 if (tuple == 2)
1157 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1158 else if (!tuple)
1159 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1160 break;
1161
1162 case TCP_V6_FLOW:
1163 case UDP_V6_FLOW:
1164 case SCTP_V6_FLOW:
1165 case AH_ESP_V6_FLOW:
1166 case AH_V6_FLOW:
1167 case ESP_V6_FLOW:
1168 case IPV6_FLOW:
1169 if (tuple == 2)
1170 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1171 else if (!tuple)
1172 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1173 break;
1174 }
1175
1176 if (bp->rss_hash_cfg == rss_hash_cfg)
1177 return 0;
1178
1179 bp->rss_hash_cfg = rss_hash_cfg;
1180 if (netif_running(bp->dev)) {
1181 bnxt_close_nic(bp, false, false);
1182 rc = bnxt_open_nic(bp, false, false);
1183 }
1184 return rc;
1185}
c0c050c5
MC
1186
1187static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1188 u32 *rule_locs)
1189{
1190 struct bnxt *bp = netdev_priv(dev);
1191 int rc = 0;
1192
1193 switch (cmd->cmd) {
a011952a 1194#ifdef CONFIG_RFS_ACCEL
c0c050c5
MC
1195 case ETHTOOL_GRXRINGS:
1196 cmd->data = bp->rx_nr_rings;
1197 break;
1198
1199 case ETHTOOL_GRXCLSRLCNT:
1200 cmd->rule_cnt = bp->ntp_fltr_count;
1201 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1202 break;
1203
1204 case ETHTOOL_GRXCLSRLALL:
1205 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1206 break;
1207
1208 case ETHTOOL_GRXCLSRULE:
1209 rc = bnxt_grxclsrule(bp, cmd);
1210 break;
a011952a
MC
1211#endif
1212
1213 case ETHTOOL_GRXFH:
1214 rc = bnxt_grxfh(bp, cmd);
1215 break;
c0c050c5
MC
1216
1217 default:
1218 rc = -EOPNOTSUPP;
1219 break;
1220 }
1221
1222 return rc;
1223}
a011952a
MC
1224
1225static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1226{
1227 struct bnxt *bp = netdev_priv(dev);
1228 int rc;
1229
1230 switch (cmd->cmd) {
1231 case ETHTOOL_SRXFH:
1232 rc = bnxt_srxfh(bp, cmd);
1233 break;
1234
1235 default:
1236 rc = -EOPNOTSUPP;
1237 break;
1238 }
1239 return rc;
1240}
c0c050c5 1241
b73c1d08 1242u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
c0c050c5 1243{
b73c1d08
MC
1244 struct bnxt *bp = netdev_priv(dev);
1245
1246 if (bp->flags & BNXT_FLAG_CHIP_P5)
1247 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
c0c050c5
MC
1248 return HW_HASH_INDEX_SIZE;
1249}
1250
1251static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1252{
1253 return HW_HASH_KEY_SIZE;
1254}
1255
1256static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1257 u8 *hfunc)
1258{
1259 struct bnxt *bp = netdev_priv(dev);
7991cb9c 1260 struct bnxt_vnic_info *vnic;
adc38ac6 1261 u32 i, tbl_size;
c0c050c5
MC
1262
1263 if (hfunc)
1264 *hfunc = ETH_RSS_HASH_TOP;
1265
7991cb9c
MC
1266 if (!bp->vnic_info)
1267 return 0;
1268
1269 vnic = &bp->vnic_info[0];
adc38ac6
MC
1270 if (indir && bp->rss_indir_tbl) {
1271 tbl_size = bnxt_get_rxfh_indir_size(dev);
1272 for (i = 0; i < tbl_size; i++)
1273 indir[i] = bp->rss_indir_tbl[i];
7991cb9c 1274 }
c0c050c5 1275
7991cb9c 1276 if (key && vnic->rss_hash_key)
c0c050c5
MC
1277 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1278
1279 return 0;
1280}
1281
bd3191b5
MC
1282static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
1283 const u8 *key, const u8 hfunc)
1284{
1285 struct bnxt *bp = netdev_priv(dev);
1286 int rc = 0;
1287
1288 if (hfunc && hfunc != ETH_RSS_HASH_TOP)
1289 return -EOPNOTSUPP;
1290
1291 if (key)
1292 return -EOPNOTSUPP;
1293
1294 if (indir) {
1295 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1296
1297 for (i = 0; i < tbl_size; i++)
1298 bp->rss_indir_tbl[i] = indir[i];
1299 pad = bp->rss_indir_tbl_entries - tbl_size;
1300 if (pad)
1301 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1302 }
1303
1304 if (netif_running(bp->dev)) {
1305 bnxt_close_nic(bp, false, false);
1306 rc = bnxt_open_nic(bp, false, false);
1307 }
1308 return rc;
1309}
1310
c0c050c5
MC
1311static void bnxt_get_drvinfo(struct net_device *dev,
1312 struct ethtool_drvinfo *info)
1313{
1314 struct bnxt *bp = netdev_priv(dev);
1315
1316 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
431aa1eb 1317 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
c0c050c5 1318 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
5c8227d0 1319 info->n_stats = bnxt_get_num_stats(bp);
eb513658 1320 info->testinfo_len = bp->num_tests;
c0c050c5
MC
1321 /* TODO CHIMP_FW: eeprom dump details */
1322 info->eedump_len = 0;
1323 /* TODO CHIMP FW: reg dump details */
1324 info->regdump_len = 0;
1325}
1326
b5d600b0
VV
1327static int bnxt_get_regs_len(struct net_device *dev)
1328{
1329 struct bnxt *bp = netdev_priv(dev);
1330 int reg_len;
1331
1332 reg_len = BNXT_PXP_REG_LEN;
1333
1334 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1335 reg_len += sizeof(struct pcie_ctx_hw_stats);
1336
1337 return reg_len;
1338}
1339
1340static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1341 void *_p)
1342{
1343 struct pcie_ctx_hw_stats *hw_pcie_stats;
1344 struct hwrm_pcie_qstats_input req = {0};
1345 struct bnxt *bp = netdev_priv(dev);
1346 dma_addr_t hw_pcie_stats_addr;
1347 int rc;
1348
1349 regs->version = 0;
1350 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1351
1352 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1353 return;
1354
1355 hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
1356 sizeof(*hw_pcie_stats),
1357 &hw_pcie_stats_addr, GFP_KERNEL);
1358 if (!hw_pcie_stats)
1359 return;
1360
1361 regs->version = 1;
1362 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
1363 req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1364 req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1365 mutex_lock(&bp->hwrm_cmd_lock);
1366 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1367 if (!rc) {
1368 __le64 *src = (__le64 *)hw_pcie_stats;
1369 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1370 int i;
1371
1372 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1373 dst[i] = le64_to_cpu(src[i]);
1374 }
1375 mutex_unlock(&bp->hwrm_cmd_lock);
1376 dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
1377 hw_pcie_stats_addr);
1378}
1379
8e202366
MC
1380static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1381{
1382 struct bnxt *bp = netdev_priv(dev);
1383
1384 wol->supported = 0;
1385 wol->wolopts = 0;
1386 memset(&wol->sopass, 0, sizeof(wol->sopass));
1387 if (bp->flags & BNXT_FLAG_WOL_CAP) {
1388 wol->supported = WAKE_MAGIC;
1389 if (bp->wol)
1390 wol->wolopts = WAKE_MAGIC;
1391 }
1392}
1393
5282db6c
MC
1394static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1395{
1396 struct bnxt *bp = netdev_priv(dev);
1397
1398 if (wol->wolopts & ~WAKE_MAGIC)
1399 return -EINVAL;
1400
1401 if (wol->wolopts & WAKE_MAGIC) {
1402 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1403 return -EINVAL;
1404 if (!bp->wol) {
1405 if (bnxt_hwrm_alloc_wol_fltr(bp))
1406 return -EBUSY;
1407 bp->wol = 1;
1408 }
1409 } else {
1410 if (bp->wol) {
1411 if (bnxt_hwrm_free_wol_fltr(bp))
1412 return -EBUSY;
1413 bp->wol = 0;
1414 }
1415 }
1416 return 0;
1417}
1418
170ce013 1419u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
c0c050c5 1420{
c0c050c5
MC
1421 u32 speed_mask = 0;
1422
1423 /* TODO: support 25GB, 40GB, 50GB with different cable type */
1424 /* set the advertised speeds */
1425 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1426 speed_mask |= ADVERTISED_100baseT_Full;
1427 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1428 speed_mask |= ADVERTISED_1000baseT_Full;
1429 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1430 speed_mask |= ADVERTISED_2500baseX_Full;
1431 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1432 speed_mask |= ADVERTISED_10000baseT_Full;
c0c050c5 1433 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1c49c421 1434 speed_mask |= ADVERTISED_40000baseCR4_Full;
27c4d578
MC
1435
1436 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1437 speed_mask |= ADVERTISED_Pause;
1438 else if (fw_pause & BNXT_LINK_PAUSE_TX)
1439 speed_mask |= ADVERTISED_Asym_Pause;
1440 else if (fw_pause & BNXT_LINK_PAUSE_RX)
1441 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1442
c0c050c5
MC
1443 return speed_mask;
1444}
1445
00c04a92
MC
1446#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1447{ \
1448 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
1449 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1450 100baseT_Full); \
1451 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
1452 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1453 1000baseT_Full); \
1454 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
1455 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1456 10000baseT_Full); \
1457 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
1458 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1459 25000baseCR_Full); \
1460 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
1461 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1462 40000baseCR4_Full);\
1463 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
1464 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1465 50000baseCR2_Full);\
38a21b34
DK
1466 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
1467 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1468 100000baseCR4_Full);\
00c04a92
MC
1469 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
1470 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1471 Pause); \
1472 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
1473 ethtool_link_ksettings_add_link_mode( \
1474 lk_ksettings, name, Asym_Pause);\
1475 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
1476 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1477 Asym_Pause); \
1478 } \
1479}
1480
1481#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
1482{ \
1483 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1484 100baseT_Full) || \
1485 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1486 100baseT_Half)) \
1487 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
1488 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1489 1000baseT_Full) || \
1490 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1491 1000baseT_Half)) \
1492 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
1493 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1494 10000baseT_Full)) \
1495 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
1496 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1497 25000baseCR_Full)) \
1498 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
1499 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1500 40000baseCR4_Full)) \
1501 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
1502 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1503 50000baseCR2_Full)) \
1504 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
38a21b34
DK
1505 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1506 100000baseCR4_Full)) \
1507 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
00c04a92
MC
1508}
1509
1510static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1511 struct ethtool_link_ksettings *lk_ksettings)
27c4d578 1512{
68515a18 1513 u16 fw_speeds = link_info->advertising;
27c4d578
MC
1514 u8 fw_pause = 0;
1515
1516 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1517 fw_pause = link_info->auto_pause_setting;
1518
00c04a92 1519 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
27c4d578
MC
1520}
1521
00c04a92
MC
1522static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1523 struct ethtool_link_ksettings *lk_ksettings)
3277360e
MC
1524{
1525 u16 fw_speeds = link_info->lp_auto_link_speeds;
1526 u8 fw_pause = 0;
1527
1528 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1529 fw_pause = link_info->lp_pause;
1530
00c04a92
MC
1531 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1532 lp_advertising);
3277360e
MC
1533}
1534
00c04a92
MC
1535static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1536 struct ethtool_link_ksettings *lk_ksettings)
4b32cacc
MC
1537{
1538 u16 fw_speeds = link_info->support_speeds;
4b32cacc 1539
00c04a92 1540 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
4b32cacc 1541
00c04a92
MC
1542 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1543 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1544 Asym_Pause);
93ed8117 1545
00c04a92
MC
1546 if (link_info->support_auto_speeds)
1547 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1548 Autoneg);
93ed8117
MC
1549}
1550
c0c050c5
MC
1551u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1552{
1553 switch (fw_link_speed) {
1554 case BNXT_LINK_SPEED_100MB:
1555 return SPEED_100;
1556 case BNXT_LINK_SPEED_1GB:
1557 return SPEED_1000;
1558 case BNXT_LINK_SPEED_2_5GB:
1559 return SPEED_2500;
1560 case BNXT_LINK_SPEED_10GB:
1561 return SPEED_10000;
1562 case BNXT_LINK_SPEED_20GB:
1563 return SPEED_20000;
1564 case BNXT_LINK_SPEED_25GB:
1565 return SPEED_25000;
1566 case BNXT_LINK_SPEED_40GB:
1567 return SPEED_40000;
1568 case BNXT_LINK_SPEED_50GB:
1569 return SPEED_50000;
38a21b34
DK
1570 case BNXT_LINK_SPEED_100GB:
1571 return SPEED_100000;
c0c050c5
MC
1572 default:
1573 return SPEED_UNKNOWN;
1574 }
1575}
1576
00c04a92
MC
1577static int bnxt_get_link_ksettings(struct net_device *dev,
1578 struct ethtool_link_ksettings *lk_ksettings)
c0c050c5
MC
1579{
1580 struct bnxt *bp = netdev_priv(dev);
1581 struct bnxt_link_info *link_info = &bp->link_info;
00c04a92
MC
1582 struct ethtool_link_settings *base = &lk_ksettings->base;
1583 u32 ethtool_speed;
c0c050c5 1584
00c04a92 1585 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
e2dc9b6e 1586 mutex_lock(&bp->link_lock);
00c04a92 1587 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
c0c050c5 1588
00c04a92 1589 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
b763499e 1590 if (link_info->autoneg) {
00c04a92
MC
1591 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1592 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1593 advertising, Autoneg);
1594 base->autoneg = AUTONEG_ENABLE;
83d8f5e9
MC
1595 base->duplex = DUPLEX_UNKNOWN;
1596 if (link_info->phy_link_status == BNXT_LINK_LINK) {
00c04a92 1597 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
83d8f5e9
MC
1598 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1599 base->duplex = DUPLEX_FULL;
1600 else
1601 base->duplex = DUPLEX_HALF;
1602 }
29c262fe 1603 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
c0c050c5 1604 } else {
00c04a92 1605 base->autoneg = AUTONEG_DISABLE;
29c262fe
MC
1606 ethtool_speed =
1607 bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
00c04a92 1608 base->duplex = DUPLEX_HALF;
29c262fe 1609 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
00c04a92 1610 base->duplex = DUPLEX_FULL;
c0c050c5 1611 }
00c04a92 1612 base->speed = ethtool_speed;
c0c050c5 1613
00c04a92 1614 base->port = PORT_NONE;
c0c050c5 1615 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
00c04a92
MC
1616 base->port = PORT_TP;
1617 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1618 TP);
1619 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1620 TP);
c0c050c5 1621 } else {
00c04a92
MC
1622 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1623 FIBRE);
1624 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1625 FIBRE);
c0c050c5
MC
1626
1627 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
00c04a92 1628 base->port = PORT_DA;
c0c050c5
MC
1629 else if (link_info->media_type ==
1630 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
00c04a92 1631 base->port = PORT_FIBRE;
c0c050c5 1632 }
00c04a92 1633 base->phy_address = link_info->phy_addr;
e2dc9b6e 1634 mutex_unlock(&bp->link_lock);
c0c050c5
MC
1635
1636 return 0;
1637}
1638
38a21b34 1639static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
c0c050c5 1640{
9d9cee08
MC
1641 struct bnxt *bp = netdev_priv(dev);
1642 struct bnxt_link_info *link_info = &bp->link_info;
1643 u16 support_spds = link_info->support_speeds;
1644 u32 fw_speed = 0;
1645
c0c050c5
MC
1646 switch (ethtool_speed) {
1647 case SPEED_100:
9d9cee08
MC
1648 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1649 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
1650 break;
c0c050c5 1651 case SPEED_1000:
9d9cee08
MC
1652 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1653 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
1654 break;
c0c050c5 1655 case SPEED_2500:
9d9cee08
MC
1656 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1657 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
1658 break;
c0c050c5 1659 case SPEED_10000:
9d9cee08
MC
1660 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1661 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
1662 break;
c0c050c5 1663 case SPEED_20000:
9d9cee08
MC
1664 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1665 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
1666 break;
c0c050c5 1667 case SPEED_25000:
9d9cee08
MC
1668 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1669 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
1670 break;
c0c050c5 1671 case SPEED_40000:
9d9cee08
MC
1672 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1673 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
1674 break;
c0c050c5 1675 case SPEED_50000:
9d9cee08
MC
1676 if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
1677 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
1678 break;
38a21b34
DK
1679 case SPEED_100000:
1680 if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
1681 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
1682 break;
c0c050c5
MC
1683 default:
1684 netdev_err(dev, "unsupported speed!\n");
1685 break;
1686 }
9d9cee08 1687 return fw_speed;
c0c050c5
MC
1688}
1689
939f7f0c 1690u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
c0c050c5
MC
1691{
1692 u16 fw_speed_mask = 0;
1693
1694 /* only support autoneg at speed 100, 1000, and 10000 */
1695 if (advertising & (ADVERTISED_100baseT_Full |
1696 ADVERTISED_100baseT_Half)) {
1697 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1698 }
1699 if (advertising & (ADVERTISED_1000baseT_Full |
1700 ADVERTISED_1000baseT_Half)) {
1701 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1702 }
1703 if (advertising & ADVERTISED_10000baseT_Full)
1704 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1705
1c49c421
MC
1706 if (advertising & ADVERTISED_40000baseCR4_Full)
1707 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1708
c0c050c5
MC
1709 return fw_speed_mask;
1710}
1711
00c04a92
MC
1712static int bnxt_set_link_ksettings(struct net_device *dev,
1713 const struct ethtool_link_ksettings *lk_ksettings)
c0c050c5 1714{
c0c050c5
MC
1715 struct bnxt *bp = netdev_priv(dev);
1716 struct bnxt_link_info *link_info = &bp->link_info;
00c04a92 1717 const struct ethtool_link_settings *base = &lk_ksettings->base;
c0c050c5 1718 bool set_pause = false;
68515a18
MC
1719 u16 fw_advertising = 0;
1720 u32 speed;
00c04a92 1721 int rc = 0;
c0c050c5 1722
c7e457f4 1723 if (!BNXT_PHY_CFG_ABLE(bp))
00c04a92 1724 return -EOPNOTSUPP;
f1a082a6 1725
e2dc9b6e 1726 mutex_lock(&bp->link_lock);
00c04a92
MC
1727 if (base->autoneg == AUTONEG_ENABLE) {
1728 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1729 advertising);
c0c050c5
MC
1730 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1731 if (!fw_advertising)
93ed8117 1732 link_info->advertising = link_info->support_auto_speeds;
c0c050c5
MC
1733 else
1734 link_info->advertising = fw_advertising;
1735 /* any change to autoneg will cause link change, therefore the
1736 * driver should put back the original pause setting in autoneg
1737 */
1738 set_pause = true;
1739 } else {
9d9cee08 1740 u16 fw_speed;
03efbec0 1741 u8 phy_type = link_info->phy_type;
9d9cee08 1742
03efbec0
MC
1743 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
1744 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1745 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1746 netdev_err(dev, "10GBase-T devices must autoneg\n");
1747 rc = -EINVAL;
1748 goto set_setting_exit;
1749 }
00c04a92 1750 if (base->duplex == DUPLEX_HALF) {
c0c050c5
MC
1751 netdev_err(dev, "HALF DUPLEX is not supported!\n");
1752 rc = -EINVAL;
1753 goto set_setting_exit;
1754 }
00c04a92 1755 speed = base->speed;
9d9cee08
MC
1756 fw_speed = bnxt_get_fw_speed(dev, speed);
1757 if (!fw_speed) {
1758 rc = -EINVAL;
1759 goto set_setting_exit;
1760 }
1761 link_info->req_link_speed = fw_speed;
c0c050c5 1762 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
b763499e 1763 link_info->autoneg = 0;
c0c050c5
MC
1764 link_info->advertising = 0;
1765 }
1766
1767 if (netif_running(dev))
939f7f0c 1768 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
c0c050c5
MC
1769
1770set_setting_exit:
e2dc9b6e 1771 mutex_unlock(&bp->link_lock);
c0c050c5
MC
1772 return rc;
1773}
1774
1775static void bnxt_get_pauseparam(struct net_device *dev,
1776 struct ethtool_pauseparam *epause)
1777{
1778 struct bnxt *bp = netdev_priv(dev);
1779 struct bnxt_link_info *link_info = &bp->link_info;
1780
1781 if (BNXT_VF(bp))
1782 return;
b763499e 1783 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
3c02d1bb
MC
1784 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
1785 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
c0c050c5
MC
1786}
1787
1788static int bnxt_set_pauseparam(struct net_device *dev,
1789 struct ethtool_pauseparam *epause)
1790{
1791 int rc = 0;
1792 struct bnxt *bp = netdev_priv(dev);
1793 struct bnxt_link_info *link_info = &bp->link_info;
1794
c7e457f4 1795 if (!BNXT_PHY_CFG_ABLE(bp))
75362a3f 1796 return -EOPNOTSUPP;
c0c050c5
MC
1797
1798 if (epause->autoneg) {
b763499e
MC
1799 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
1800 return -EINVAL;
1801
c0c050c5 1802 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
c9ee9516
MC
1803 if (bp->hwrm_spec_code >= 0x10201)
1804 link_info->req_flow_ctrl =
1805 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
1806 } else {
1807 /* when transition from auto pause to force pause,
1808 * force a link change
1809 */
1810 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1811 link_info->force_link_chng = true;
1812 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
c9ee9516 1813 link_info->req_flow_ctrl = 0;
c0c050c5
MC
1814 }
1815 if (epause->rx_pause)
1816 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
c0c050c5
MC
1817
1818 if (epause->tx_pause)
1819 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
c0c050c5 1820
163e9ef6
VV
1821 if (netif_running(dev)) {
1822 mutex_lock(&bp->link_lock);
c0c050c5 1823 rc = bnxt_hwrm_set_pause(bp);
163e9ef6
VV
1824 mutex_unlock(&bp->link_lock);
1825 }
c0c050c5
MC
1826 return rc;
1827}
1828
1829static u32 bnxt_get_link(struct net_device *dev)
1830{
1831 struct bnxt *bp = netdev_priv(dev);
1832
1833 /* TODO: handle MF, VF, driver close case */
1834 return bp->link_info.link_up;
1835}
1836
b3b0ddd0
MC
1837static void bnxt_print_admin_err(struct bnxt *bp)
1838{
1839 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
1840}
1841
5ac67d8b
RS
1842static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1843 u16 ext, u16 *index, u32 *item_length,
1844 u32 *data_length);
1845
c0c050c5
MC
1846static int bnxt_flash_nvram(struct net_device *dev,
1847 u16 dir_type,
1848 u16 dir_ordinal,
1849 u16 dir_ext,
1850 u16 dir_attr,
1851 const u8 *data,
1852 size_t data_len)
1853{
1854 struct bnxt *bp = netdev_priv(dev);
1855 int rc;
1856 struct hwrm_nvm_write_input req = {0};
1857 dma_addr_t dma_handle;
1858 u8 *kmem;
1859
1860 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
1861
1862 req.dir_type = cpu_to_le16(dir_type);
1863 req.dir_ordinal = cpu_to_le16(dir_ordinal);
1864 req.dir_ext = cpu_to_le16(dir_ext);
1865 req.dir_attr = cpu_to_le16(dir_attr);
1866 req.dir_data_length = cpu_to_le32(data_len);
1867
1868 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
1869 GFP_KERNEL);
1870 if (!kmem) {
1871 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1872 (unsigned)data_len);
1873 return -ENOMEM;
1874 }
1875 memcpy(kmem, data, data_len);
1876 req.host_src_addr = cpu_to_le64(dma_handle);
1877
1878 rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
1879 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
1880
d4f1420d 1881 if (rc == -EACCES)
b3b0ddd0 1882 bnxt_print_admin_err(bp);
c0c050c5
MC
1883 return rc;
1884}
1885
95fec034
EP
1886static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
1887 u8 self_reset, u8 flags)
d2d6318c 1888{
d2d6318c 1889 struct hwrm_fw_reset_input req = {0};
7c675421
VV
1890 struct bnxt *bp = netdev_priv(dev);
1891 int rc;
d2d6318c
RS
1892
1893 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
1894
95fec034
EP
1895 req.embedded_proc_type = proc_type;
1896 req.selfrst_status = self_reset;
1897 req.flags = flags;
1898
8cec0940
EP
1899 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
1900 rc = hwrm_send_message_silent(bp, &req, sizeof(req),
1901 HWRM_CMD_TIMEOUT);
1902 } else {
1903 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1904 if (rc == -EACCES)
1905 bnxt_print_admin_err(bp);
1906 }
95fec034
EP
1907 return rc;
1908}
1909
94f17e89
EP
1910static int bnxt_firmware_reset(struct net_device *dev,
1911 enum bnxt_nvm_directory_type dir_type)
95fec034
EP
1912{
1913 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
95fec034
EP
1914 u8 proc_type, flags = 0;
1915
d2d6318c
RS
1916 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1917 /* (e.g. when firmware isn't already running) */
1918 switch (dir_type) {
1919 case BNX_DIR_TYPE_CHIMP_PATCH:
1920 case BNX_DIR_TYPE_BOOTCODE:
1921 case BNX_DIR_TYPE_BOOTCODE_2:
95fec034 1922 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
d2d6318c 1923 /* Self-reset ChiMP upon next PCIe reset: */
95fec034 1924 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
d2d6318c
RS
1925 break;
1926 case BNX_DIR_TYPE_APE_FW:
1927 case BNX_DIR_TYPE_APE_PATCH:
95fec034 1928 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
08141e0b 1929 /* Self-reset APE upon next PCIe reset: */
95fec034 1930 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
d2d6318c
RS
1931 break;
1932 case BNX_DIR_TYPE_KONG_FW:
1933 case BNX_DIR_TYPE_KONG_PATCH:
95fec034 1934 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
d2d6318c
RS
1935 break;
1936 case BNX_DIR_TYPE_BONO_FW:
1937 case BNX_DIR_TYPE_BONO_PATCH:
95fec034 1938 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
d2d6318c
RS
1939 break;
1940 default:
1941 return -EINVAL;
1942 }
1943
95fec034 1944 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
d2d6318c
RS
1945}
1946
94f17e89
EP
1947static int bnxt_firmware_reset_chip(struct net_device *dev)
1948{
1949 struct bnxt *bp = netdev_priv(dev);
1950 u8 flags = 0;
1951
1952 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
1953 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
1954
1955 return bnxt_hwrm_firmware_reset(dev,
1956 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
1957 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
1958 flags);
1959}
1960
1961static int bnxt_firmware_reset_ap(struct net_device *dev)
1962{
1963 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
1964 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
1965 0);
1966}
1967
c0c050c5
MC
1968static int bnxt_flash_firmware(struct net_device *dev,
1969 u16 dir_type,
1970 const u8 *fw_data,
1971 size_t fw_size)
1972{
1973 int rc = 0;
1974 u16 code_type;
1975 u32 stored_crc;
1976 u32 calculated_crc;
1977 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
1978
1979 switch (dir_type) {
1980 case BNX_DIR_TYPE_BOOTCODE:
1981 case BNX_DIR_TYPE_BOOTCODE_2:
1982 code_type = CODE_BOOT;
1983 break;
93e0b4fe
RS
1984 case BNX_DIR_TYPE_CHIMP_PATCH:
1985 code_type = CODE_CHIMP_PATCH;
1986 break;
2731d70f
RS
1987 case BNX_DIR_TYPE_APE_FW:
1988 code_type = CODE_MCTP_PASSTHRU;
1989 break;
93e0b4fe
RS
1990 case BNX_DIR_TYPE_APE_PATCH:
1991 code_type = CODE_APE_PATCH;
1992 break;
1993 case BNX_DIR_TYPE_KONG_FW:
1994 code_type = CODE_KONG_FW;
1995 break;
1996 case BNX_DIR_TYPE_KONG_PATCH:
1997 code_type = CODE_KONG_PATCH;
1998 break;
1999 case BNX_DIR_TYPE_BONO_FW:
2000 code_type = CODE_BONO_FW;
2001 break;
2002 case BNX_DIR_TYPE_BONO_PATCH:
2003 code_type = CODE_BONO_PATCH;
2004 break;
c0c050c5
MC
2005 default:
2006 netdev_err(dev, "Unsupported directory entry type: %u\n",
2007 dir_type);
2008 return -EINVAL;
2009 }
2010 if (fw_size < sizeof(struct bnxt_fw_header)) {
2011 netdev_err(dev, "Invalid firmware file size: %u\n",
2012 (unsigned int)fw_size);
2013 return -EINVAL;
2014 }
2015 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
2016 netdev_err(dev, "Invalid firmware signature: %08X\n",
2017 le32_to_cpu(header->signature));
2018 return -EINVAL;
2019 }
2020 if (header->code_type != code_type) {
2021 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
2022 code_type, header->code_type);
2023 return -EINVAL;
2024 }
2025 if (header->device != DEVICE_CUMULUS_FAMILY) {
2026 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
2027 DEVICE_CUMULUS_FAMILY, header->device);
2028 return -EINVAL;
2029 }
2030 /* Confirm the CRC32 checksum of the file: */
2031 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2032 sizeof(stored_crc)));
2033 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2034 if (calculated_crc != stored_crc) {
2035 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
2036 (unsigned long)stored_crc,
2037 (unsigned long)calculated_crc);
2038 return -EINVAL;
2039 }
c0c050c5
MC
2040 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2041 0, 0, fw_data, fw_size);
d2d6318c
RS
2042 if (rc == 0) /* Firmware update successful */
2043 rc = bnxt_firmware_reset(dev, dir_type);
2044
c0c050c5
MC
2045 return rc;
2046}
2047
5ac67d8b
RS
2048static int bnxt_flash_microcode(struct net_device *dev,
2049 u16 dir_type,
2050 const u8 *fw_data,
2051 size_t fw_size)
2052{
2053 struct bnxt_ucode_trailer *trailer;
2054 u32 calculated_crc;
2055 u32 stored_crc;
2056 int rc = 0;
2057
2058 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
2059 netdev_err(dev, "Invalid microcode file size: %u\n",
2060 (unsigned int)fw_size);
2061 return -EINVAL;
2062 }
2063 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
2064 sizeof(*trailer)));
2065 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
2066 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
2067 le32_to_cpu(trailer->sig));
2068 return -EINVAL;
2069 }
2070 if (le16_to_cpu(trailer->dir_type) != dir_type) {
2071 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
2072 dir_type, le16_to_cpu(trailer->dir_type));
2073 return -EINVAL;
2074 }
2075 if (le16_to_cpu(trailer->trailer_length) <
2076 sizeof(struct bnxt_ucode_trailer)) {
2077 netdev_err(dev, "Invalid microcode trailer length: %d\n",
2078 le16_to_cpu(trailer->trailer_length));
2079 return -EINVAL;
2080 }
2081
2082 /* Confirm the CRC32 checksum of the file: */
2083 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2084 sizeof(stored_crc)));
2085 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2086 if (calculated_crc != stored_crc) {
2087 netdev_err(dev,
2088 "CRC32 (%08lX) does not match calculated: %08lX\n",
2089 (unsigned long)stored_crc,
2090 (unsigned long)calculated_crc);
2091 return -EINVAL;
2092 }
2093 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2094 0, 0, fw_data, fw_size);
2095
2096 return rc;
2097}
2098
c0c050c5
MC
2099static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
2100{
2101 switch (dir_type) {
2102 case BNX_DIR_TYPE_CHIMP_PATCH:
2103 case BNX_DIR_TYPE_BOOTCODE:
2104 case BNX_DIR_TYPE_BOOTCODE_2:
2105 case BNX_DIR_TYPE_APE_FW:
2106 case BNX_DIR_TYPE_APE_PATCH:
2107 case BNX_DIR_TYPE_KONG_FW:
2108 case BNX_DIR_TYPE_KONG_PATCH:
93e0b4fe
RS
2109 case BNX_DIR_TYPE_BONO_FW:
2110 case BNX_DIR_TYPE_BONO_PATCH:
c0c050c5
MC
2111 return true;
2112 }
2113
2114 return false;
2115}
2116
5ac67d8b 2117static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
c0c050c5
MC
2118{
2119 switch (dir_type) {
2120 case BNX_DIR_TYPE_AVS:
2121 case BNX_DIR_TYPE_EXP_ROM_MBA:
2122 case BNX_DIR_TYPE_PCIE:
2123 case BNX_DIR_TYPE_TSCF_UCODE:
2124 case BNX_DIR_TYPE_EXT_PHY:
2125 case BNX_DIR_TYPE_CCM:
2126 case BNX_DIR_TYPE_ISCSI_BOOT:
2127 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2128 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2129 return true;
2130 }
2131
2132 return false;
2133}
2134
2135static bool bnxt_dir_type_is_executable(u16 dir_type)
2136{
2137 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
5ac67d8b 2138 bnxt_dir_type_is_other_exec_format(dir_type);
c0c050c5
MC
2139}
2140
2141static int bnxt_flash_firmware_from_file(struct net_device *dev,
2142 u16 dir_type,
2143 const char *filename)
2144{
2145 const struct firmware *fw;
2146 int rc;
2147
c0c050c5
MC
2148 rc = request_firmware(&fw, filename, &dev->dev);
2149 if (rc != 0) {
2150 netdev_err(dev, "Error %d requesting firmware file: %s\n",
2151 rc, filename);
2152 return rc;
2153 }
ba425800 2154 if (bnxt_dir_type_is_ape_bin_format(dir_type))
c0c050c5 2155 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
ba425800 2156 else if (bnxt_dir_type_is_other_exec_format(dir_type))
5ac67d8b 2157 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
c0c050c5
MC
2158 else
2159 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2160 0, 0, fw->data, fw->size);
2161 release_firmware(fw);
2162 return rc;
2163}
2164
d168f328
VV
2165int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
2166 u32 install_type)
c0c050c5 2167{
5ac67d8b
RS
2168 struct bnxt *bp = netdev_priv(dev);
2169 struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
2170 struct hwrm_nvm_install_update_input install = {0};
2171 const struct firmware *fw;
2172 u32 item_len;
22630e28 2173 int rc = 0;
5ac67d8b 2174 u16 index;
5ac67d8b
RS
2175
2176 bnxt_hwrm_fw_set_time(bp);
2177
95ec1f47
VV
2178 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2179 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2180 &index, &item_len, NULL);
2181 if (rc) {
5ac67d8b 2182 netdev_err(dev, "PKG update area not created in nvram\n");
95ec1f47 2183 return rc;
5ac67d8b
RS
2184 }
2185
2186 rc = request_firmware(&fw, filename, &dev->dev);
2187 if (rc != 0) {
2188 netdev_err(dev, "PKG error %d requesting file: %s\n",
2189 rc, filename);
2190 return rc;
2191 }
2192
2193 if (fw->size > item_len) {
9a005c38 2194 netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
5ac67d8b
RS
2195 (unsigned long)fw->size);
2196 rc = -EFBIG;
2197 } else {
2198 dma_addr_t dma_handle;
2199 u8 *kmem;
2200 struct hwrm_nvm_modify_input modify = {0};
2201
2202 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
2203
2204 modify.dir_idx = cpu_to_le16(index);
2205 modify.len = cpu_to_le32(fw->size);
2206
2207 kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
2208 &dma_handle, GFP_KERNEL);
2209 if (!kmem) {
2210 netdev_err(dev,
2211 "dma_alloc_coherent failure, length = %u\n",
2212 (unsigned int)fw->size);
2213 rc = -ENOMEM;
2214 } else {
2215 memcpy(kmem, fw->data, fw->size);
2216 modify.host_src_addr = cpu_to_le64(dma_handle);
2217
22630e28
EP
2218 rc = hwrm_send_message(bp, &modify, sizeof(modify),
2219 FLASH_PACKAGE_TIMEOUT);
5ac67d8b
RS
2220 dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
2221 dma_handle);
2222 }
2223 }
2224 release_firmware(fw);
22630e28 2225 if (rc)
7c675421 2226 goto err_exit;
5ac67d8b
RS
2227
2228 if ((install_type & 0xffff) == 0)
2229 install_type >>= 16;
2230 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
2231 install.install_type = cpu_to_le32(install_type);
2232
cb4d1d62 2233 mutex_lock(&bp->hwrm_cmd_lock);
22630e28
EP
2234 rc = _hwrm_send_message(bp, &install, sizeof(install),
2235 INSTALL_PACKAGE_TIMEOUT);
2236 if (rc) {
cb4d1d62
KS
2237 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
2238
dd2ebf34
VV
2239 if (resp->error_code && error_code ==
2240 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
cb4d1d62
KS
2241 install.flags |= cpu_to_le16(
2242 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
22630e28
EP
2243 rc = _hwrm_send_message(bp, &install, sizeof(install),
2244 INSTALL_PACKAGE_TIMEOUT);
cb4d1d62 2245 }
22630e28 2246 if (rc)
dd2ebf34 2247 goto flash_pkg_exit;
cb4d1d62 2248 }
5ac67d8b
RS
2249
2250 if (resp->result) {
2251 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
2252 (s8)resp->result, (int)resp->problem_item);
cb4d1d62 2253 rc = -ENOPKG;
5ac67d8b 2254 }
cb4d1d62
KS
2255flash_pkg_exit:
2256 mutex_unlock(&bp->hwrm_cmd_lock);
7c675421 2257err_exit:
22630e28 2258 if (rc == -EACCES)
b3b0ddd0 2259 bnxt_print_admin_err(bp);
cb4d1d62 2260 return rc;
c0c050c5
MC
2261}
2262
2263static int bnxt_flash_device(struct net_device *dev,
2264 struct ethtool_flash *flash)
2265{
2266 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
2267 netdev_err(dev, "flashdev not supported from a virtual function\n");
2268 return -EINVAL;
2269 }
2270
5ac67d8b
RS
2271 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
2272 flash->region > 0xffff)
2273 return bnxt_flash_package_from_file(dev, flash->data,
2274 flash->region);
c0c050c5
MC
2275
2276 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
2277}
2278
2279static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
2280{
2281 struct bnxt *bp = netdev_priv(dev);
2282 int rc;
2283 struct hwrm_nvm_get_dir_info_input req = {0};
2284 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
2285
2286 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
2287
2288 mutex_lock(&bp->hwrm_cmd_lock);
2289 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2290 if (!rc) {
2291 *entries = le32_to_cpu(output->entries);
2292 *length = le32_to_cpu(output->entry_length);
2293 }
2294 mutex_unlock(&bp->hwrm_cmd_lock);
2295 return rc;
2296}
2297
2298static int bnxt_get_eeprom_len(struct net_device *dev)
2299{
4cebbaca
MC
2300 struct bnxt *bp = netdev_priv(dev);
2301
2302 if (BNXT_VF(bp))
2303 return 0;
2304
c0c050c5
MC
2305 /* The -1 return value allows the entire 32-bit range of offsets to be
2306 * passed via the ethtool command-line utility.
2307 */
2308 return -1;
2309}
2310
2311static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
2312{
2313 struct bnxt *bp = netdev_priv(dev);
2314 int rc;
2315 u32 dir_entries;
2316 u32 entry_length;
2317 u8 *buf;
2318 size_t buflen;
2319 dma_addr_t dma_handle;
2320 struct hwrm_nvm_get_dir_entries_input req = {0};
2321
2322 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
2323 if (rc != 0)
2324 return rc;
2325
2326 /* Insert 2 bytes of directory info (count and size of entries) */
2327 if (len < 2)
2328 return -EINVAL;
2329
2330 *data++ = dir_entries;
2331 *data++ = entry_length;
2332 len -= 2;
2333 memset(data, 0xff, len);
2334
2335 buflen = dir_entries * entry_length;
2336 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
2337 GFP_KERNEL);
2338 if (!buf) {
2339 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2340 (unsigned)buflen);
2341 return -ENOMEM;
2342 }
2343 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
2344 req.host_dest_addr = cpu_to_le64(dma_handle);
2345 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2346 if (rc == 0)
2347 memcpy(data, buf, len > buflen ? buflen : len);
2348 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
2349 return rc;
2350}
2351
2352static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
2353 u32 length, u8 *data)
2354{
2355 struct bnxt *bp = netdev_priv(dev);
2356 int rc;
2357 u8 *buf;
2358 dma_addr_t dma_handle;
2359 struct hwrm_nvm_read_input req = {0};
2360
e0ad8fc5
MC
2361 if (!length)
2362 return -EINVAL;
2363
c0c050c5
MC
2364 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
2365 GFP_KERNEL);
2366 if (!buf) {
2367 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
2368 (unsigned)length);
2369 return -ENOMEM;
2370 }
2371 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
2372 req.host_dest_addr = cpu_to_le64(dma_handle);
2373 req.dir_idx = cpu_to_le16(index);
2374 req.offset = cpu_to_le32(offset);
2375 req.len = cpu_to_le32(length);
2376
2377 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2378 if (rc == 0)
2379 memcpy(data, buf, length);
2380 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
2381 return rc;
2382}
2383
3ebf6f0a
RS
2384static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2385 u16 ext, u16 *index, u32 *item_length,
2386 u32 *data_length)
2387{
2388 struct bnxt *bp = netdev_priv(dev);
2389 int rc;
2390 struct hwrm_nvm_find_dir_entry_input req = {0};
2391 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
2392
2393 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
2394 req.enables = 0;
2395 req.dir_idx = 0;
2396 req.dir_type = cpu_to_le16(type);
2397 req.dir_ordinal = cpu_to_le16(ordinal);
2398 req.dir_ext = cpu_to_le16(ext);
2399 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
cc72f3b1
MC
2400 mutex_lock(&bp->hwrm_cmd_lock);
2401 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3ebf6f0a
RS
2402 if (rc == 0) {
2403 if (index)
2404 *index = le16_to_cpu(output->dir_idx);
2405 if (item_length)
2406 *item_length = le32_to_cpu(output->dir_item_length);
2407 if (data_length)
2408 *data_length = le32_to_cpu(output->dir_data_length);
2409 }
cc72f3b1 2410 mutex_unlock(&bp->hwrm_cmd_lock);
3ebf6f0a
RS
2411 return rc;
2412}
2413
2414static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2415{
2416 char *retval = NULL;
2417 char *p;
2418 char *value;
2419 int field = 0;
2420
2421 if (datalen < 1)
2422 return NULL;
2423 /* null-terminate the log data (removing last '\n'): */
2424 data[datalen - 1] = 0;
2425 for (p = data; *p != 0; p++) {
2426 field = 0;
2427 retval = NULL;
2428 while (*p != 0 && *p != '\n') {
2429 value = p;
2430 while (*p != 0 && *p != '\t' && *p != '\n')
2431 p++;
2432 if (field == desired_field)
2433 retval = value;
2434 if (*p != '\t')
2435 break;
2436 *p = 0;
2437 field++;
2438 p++;
2439 }
2440 if (*p == 0)
2441 break;
2442 *p = 0;
2443 }
2444 return retval;
2445}
2446
a60faa60 2447static void bnxt_get_pkgver(struct net_device *dev)
3ebf6f0a 2448{
a60faa60 2449 struct bnxt *bp = netdev_priv(dev);
3ebf6f0a 2450 u16 index = 0;
a60faa60
VV
2451 char *pkgver;
2452 u32 pkglen;
2453 u8 *pkgbuf;
2454 int len;
3ebf6f0a
RS
2455
2456 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2457 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
a60faa60
VV
2458 &index, NULL, &pkglen) != 0)
2459 return;
3ebf6f0a 2460
a60faa60
VV
2461 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2462 if (!pkgbuf) {
2463 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
2464 pkglen);
2465 return;
2466 }
2467
2468 if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
2469 goto err;
3ebf6f0a 2470
a60faa60
VV
2471 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
2472 pkglen);
2473 if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
2474 len = strlen(bp->fw_ver_str);
2475 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
2476 "/pkg %s", pkgver);
2477 }
2478err:
2479 kfree(pkgbuf);
3ebf6f0a
RS
2480}
2481
c0c050c5
MC
2482static int bnxt_get_eeprom(struct net_device *dev,
2483 struct ethtool_eeprom *eeprom,
2484 u8 *data)
2485{
2486 u32 index;
2487 u32 offset;
2488
2489 if (eeprom->offset == 0) /* special offset value to get directory */
2490 return bnxt_get_nvram_directory(dev, eeprom->len, data);
2491
2492 index = eeprom->offset >> 24;
2493 offset = eeprom->offset & 0xffffff;
2494
2495 if (index == 0) {
2496 netdev_err(dev, "unsupported index value: %d\n", index);
2497 return -EINVAL;
2498 }
2499
2500 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
2501}
2502
2503static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
2504{
2505 struct bnxt *bp = netdev_priv(dev);
2506 struct hwrm_nvm_erase_dir_entry_input req = {0};
2507
2508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
2509 req.dir_idx = cpu_to_le16(index);
2510 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2511}
2512
2513static int bnxt_set_eeprom(struct net_device *dev,
2514 struct ethtool_eeprom *eeprom,
2515 u8 *data)
2516{
2517 struct bnxt *bp = netdev_priv(dev);
2518 u8 index, dir_op;
2519 u16 type, ext, ordinal, attr;
2520
2521 if (!BNXT_PF(bp)) {
2522 netdev_err(dev, "NVM write not supported from a virtual function\n");
2523 return -EINVAL;
2524 }
2525
2526 type = eeprom->magic >> 16;
2527
2528 if (type == 0xffff) { /* special value for directory operations */
2529 index = eeprom->magic & 0xff;
2530 dir_op = eeprom->magic >> 8;
2531 if (index == 0)
2532 return -EINVAL;
2533 switch (dir_op) {
2534 case 0x0e: /* erase */
2535 if (eeprom->offset != ~eeprom->magic)
2536 return -EINVAL;
2537 return bnxt_erase_nvram_directory(dev, index - 1);
2538 default:
2539 return -EINVAL;
2540 }
2541 }
2542
2543 /* Create or re-write an NVM item: */
ba425800 2544 if (bnxt_dir_type_is_executable(type))
5ac67d8b 2545 return -EOPNOTSUPP;
c0c050c5
MC
2546 ext = eeprom->magic & 0xffff;
2547 ordinal = eeprom->offset >> 16;
2548 attr = eeprom->offset & 0xffff;
2549
2550 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
2551 eeprom->len);
2552}
2553
72b34f04
MC
2554static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2555{
2556 struct bnxt *bp = netdev_priv(dev);
2557 struct ethtool_eee *eee = &bp->eee;
2558 struct bnxt_link_info *link_info = &bp->link_info;
2559 u32 advertising =
2560 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
2561 int rc = 0;
2562
c7e457f4 2563 if (!BNXT_PHY_CFG_ABLE(bp))
75362a3f 2564 return -EOPNOTSUPP;
72b34f04
MC
2565
2566 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2567 return -EOPNOTSUPP;
2568
2569 if (!edata->eee_enabled)
2570 goto eee_ok;
2571
2572 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2573 netdev_warn(dev, "EEE requires autoneg\n");
2574 return -EINVAL;
2575 }
2576 if (edata->tx_lpi_enabled) {
2577 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
2578 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
2579 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
2580 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
2581 return -EINVAL;
2582 } else if (!bp->lpi_tmr_hi) {
2583 edata->tx_lpi_timer = eee->tx_lpi_timer;
2584 }
2585 }
2586 if (!edata->advertised) {
2587 edata->advertised = advertising & eee->supported;
2588 } else if (edata->advertised & ~advertising) {
2589 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
2590 edata->advertised, advertising);
2591 return -EINVAL;
2592 }
2593
2594 eee->advertised = edata->advertised;
2595 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
2596 eee->tx_lpi_timer = edata->tx_lpi_timer;
2597eee_ok:
2598 eee->eee_enabled = edata->eee_enabled;
2599
2600 if (netif_running(dev))
2601 rc = bnxt_hwrm_set_link_setting(bp, false, true);
2602
2603 return rc;
2604}
2605
2606static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
2607{
2608 struct bnxt *bp = netdev_priv(dev);
2609
2610 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2611 return -EOPNOTSUPP;
2612
2613 *edata = bp->eee;
2614 if (!bp->eee.eee_enabled) {
2615 /* Preserve tx_lpi_timer so that the last value will be used
2616 * by default when it is re-enabled.
2617 */
2618 edata->advertised = 0;
2619 edata->tx_lpi_enabled = 0;
2620 }
2621
2622 if (!bp->eee.eee_active)
2623 edata->lp_advertised = 0;
2624
2625 return 0;
2626}
2627
42ee18fe
AK
2628static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
2629 u16 page_number, u16 start_addr,
2630 u16 data_length, u8 *buf)
2631{
2632 struct hwrm_port_phy_i2c_read_input req = {0};
2633 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2634 int rc, byte_offset = 0;
2635
2636 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2637 req.i2c_slave_addr = i2c_addr;
2638 req.page_number = cpu_to_le16(page_number);
2639 req.port_id = cpu_to_le16(bp->pf.port_id);
2640 do {
2641 u16 xfer_size;
2642
2643 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2644 data_length -= xfer_size;
2645 req.page_offset = cpu_to_le16(start_addr + byte_offset);
2646 req.data_length = xfer_size;
2647 req.enables = cpu_to_le32(start_addr + byte_offset ?
2648 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
2649 mutex_lock(&bp->hwrm_cmd_lock);
2650 rc = _hwrm_send_message(bp, &req, sizeof(req),
2651 HWRM_CMD_TIMEOUT);
2652 if (!rc)
2653 memcpy(buf + byte_offset, output->data, xfer_size);
2654 mutex_unlock(&bp->hwrm_cmd_lock);
2655 byte_offset += xfer_size;
2656 } while (!rc && data_length > 0);
2657
2658 return rc;
2659}
2660
2661static int bnxt_get_module_info(struct net_device *dev,
2662 struct ethtool_modinfo *modinfo)
2663{
7328a23c 2664 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
42ee18fe 2665 struct bnxt *bp = netdev_priv(dev);
42ee18fe
AK
2666 int rc;
2667
2668 /* No point in going further if phy status indicates
2669 * module is not inserted or if it is powered down or
2670 * if it is of type 10GBase-T
2671 */
2672 if (bp->link_info.module_status >
2673 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
2674 return -EOPNOTSUPP;
2675
2676 /* This feature is not supported in older firmware versions */
2677 if (bp->hwrm_spec_code < 0x10202)
2678 return -EOPNOTSUPP;
2679
7328a23c
VV
2680 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
2681 SFF_DIAG_SUPPORT_OFFSET + 1,
2682 data);
42ee18fe 2683 if (!rc) {
7328a23c
VV
2684 u8 module_id = data[0];
2685 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
42ee18fe
AK
2686
2687 switch (module_id) {
2688 case SFF_MODULE_ID_SFP:
2689 modinfo->type = ETH_MODULE_SFF_8472;
2690 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
7328a23c
VV
2691 if (!diag_supported)
2692 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
42ee18fe
AK
2693 break;
2694 case SFF_MODULE_ID_QSFP:
2695 case SFF_MODULE_ID_QSFP_PLUS:
2696 modinfo->type = ETH_MODULE_SFF_8436;
2697 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2698 break;
2699 case SFF_MODULE_ID_QSFP28:
2700 modinfo->type = ETH_MODULE_SFF_8636;
2701 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2702 break;
2703 default:
2704 rc = -EOPNOTSUPP;
2705 break;
2706 }
2707 }
42ee18fe
AK
2708 return rc;
2709}
2710
2711static int bnxt_get_module_eeprom(struct net_device *dev,
2712 struct ethtool_eeprom *eeprom,
2713 u8 *data)
2714{
2715 struct bnxt *bp = netdev_priv(dev);
2716 u16 start = eeprom->offset, length = eeprom->len;
f3ea3119 2717 int rc = 0;
42ee18fe
AK
2718
2719 memset(data, 0, eeprom->len);
2720
2721 /* Read A0 portion of the EEPROM */
2722 if (start < ETH_MODULE_SFF_8436_LEN) {
2723 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
2724 length = ETH_MODULE_SFF_8436_LEN - start;
2725 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
2726 start, length, data);
2727 if (rc)
2728 return rc;
2729 start += length;
2730 data += length;
2731 length = eeprom->len - length;
2732 }
2733
2734 /* Read A2 portion of the EEPROM */
2735 if (length) {
2736 start -= ETH_MODULE_SFF_8436_LEN;
dea521a2
CJ
2737 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
2738 start, length, data);
42ee18fe
AK
2739 }
2740 return rc;
2741}
2742
ae8e98a6
DK
2743static int bnxt_nway_reset(struct net_device *dev)
2744{
2745 int rc = 0;
2746
2747 struct bnxt *bp = netdev_priv(dev);
2748 struct bnxt_link_info *link_info = &bp->link_info;
2749
c7e457f4 2750 if (!BNXT_PHY_CFG_ABLE(bp))
ae8e98a6
DK
2751 return -EOPNOTSUPP;
2752
2753 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
2754 return -EINVAL;
2755
2756 if (netif_running(dev))
2757 rc = bnxt_hwrm_set_link_setting(bp, true, false);
2758
2759 return rc;
2760}
2761
5ad2cbee
MC
2762static int bnxt_set_phys_id(struct net_device *dev,
2763 enum ethtool_phys_id_state state)
2764{
2765 struct hwrm_port_led_cfg_input req = {0};
2766 struct bnxt *bp = netdev_priv(dev);
2767 struct bnxt_pf_info *pf = &bp->pf;
2768 struct bnxt_led_cfg *led_cfg;
2769 u8 led_state;
2770 __le16 duration;
9f90445c 2771 int i;
5ad2cbee
MC
2772
2773 if (!bp->num_leds || BNXT_VF(bp))
2774 return -EOPNOTSUPP;
2775
2776 if (state == ETHTOOL_ID_ACTIVE) {
2777 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
2778 duration = cpu_to_le16(500);
2779 } else if (state == ETHTOOL_ID_INACTIVE) {
2780 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
2781 duration = cpu_to_le16(0);
2782 } else {
2783 return -EINVAL;
2784 }
2785 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
2786 req.port_id = cpu_to_le16(pf->port_id);
2787 req.num_leds = bp->num_leds;
2788 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2789 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2790 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2791 led_cfg->led_id = bp->leds[i].led_id;
2792 led_cfg->led_state = led_state;
2793 led_cfg->led_blink_on = duration;
2794 led_cfg->led_blink_off = duration;
2795 led_cfg->led_group_id = bp->leds[i].led_group_id;
2796 }
9f90445c 2797 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5ad2cbee
MC
2798}
2799
67fea463
MC
2800static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
2801{
2802 struct hwrm_selftest_irq_input req = {0};
2803
2804 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
2805 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2806}
2807
2808static int bnxt_test_irq(struct bnxt *bp)
2809{
2810 int i;
2811
2812 for (i = 0; i < bp->cp_nr_rings; i++) {
2813 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
2814 int rc;
2815
2816 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
2817 if (rc)
2818 return rc;
2819 }
2820 return 0;
2821}
2822
f7dc1ea6
MC
2823static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
2824{
2825 struct hwrm_port_mac_cfg_input req = {0};
2826
2827 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
2828
2829 req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
2830 if (enable)
2831 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
2832 else
2833 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
2834 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2835}
2836
56d37462
VV
2837static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
2838{
2839 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2840 struct hwrm_port_phy_qcaps_input req = {0};
2841 int rc;
2842
2843 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
2844 mutex_lock(&bp->hwrm_cmd_lock);
2845 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2846 if (!rc)
2847 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
2848
2849 mutex_unlock(&bp->hwrm_cmd_lock);
2850 return rc;
2851}
2852
91725d89
MC
2853static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
2854 struct hwrm_port_phy_cfg_input *req)
2855{
2856 struct bnxt_link_info *link_info = &bp->link_info;
56d37462 2857 u16 fw_advertising;
91725d89
MC
2858 u16 fw_speed;
2859 int rc;
2860
8a60efd1
MC
2861 if (!link_info->autoneg ||
2862 (bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK))
91725d89
MC
2863 return 0;
2864
56d37462
VV
2865 rc = bnxt_query_force_speeds(bp, &fw_advertising);
2866 if (rc)
2867 return rc;
2868
91725d89 2869 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
83d8f5e9 2870 if (bp->link_info.link_up)
91725d89
MC
2871 fw_speed = bp->link_info.link_speed;
2872 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
2873 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2874 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
2875 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2876 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
2877 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2878 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
2879 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2880
2881 req->force_link_speed = cpu_to_le16(fw_speed);
2882 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
2883 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2884 rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
2885 req->flags = 0;
2886 req->force_link_speed = cpu_to_le16(0);
2887 return rc;
2888}
2889
55fd0cf3 2890static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
91725d89
MC
2891{
2892 struct hwrm_port_phy_cfg_input req = {0};
2893
2894 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
2895
2896 if (enable) {
2897 bnxt_disable_an_for_lpbk(bp, &req);
55fd0cf3
MC
2898 if (ext)
2899 req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
2900 else
2901 req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
91725d89
MC
2902 } else {
2903 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
2904 }
2905 req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
2906 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2907}
2908
e44758b7 2909static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
f7dc1ea6
MC
2910 u32 raw_cons, int pkt_size)
2911{
e44758b7
MC
2912 struct bnxt_napi *bnapi = cpr->bnapi;
2913 struct bnxt_rx_ring_info *rxr;
f7dc1ea6
MC
2914 struct bnxt_sw_rx_bd *rx_buf;
2915 struct rx_cmp *rxcmp;
2916 u16 cp_cons, cons;
2917 u8 *data;
2918 u32 len;
2919 int i;
2920
e44758b7 2921 rxr = bnapi->rx_ring;
f7dc1ea6
MC
2922 cp_cons = RING_CMP(raw_cons);
2923 rxcmp = (struct rx_cmp *)
2924 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2925 cons = rxcmp->rx_cmp_opaque;
2926 rx_buf = &rxr->rx_buf_ring[cons];
2927 data = rx_buf->data_ptr;
2928 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
2929 if (len != pkt_size)
2930 return -EIO;
2931 i = ETH_ALEN;
2932 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
2933 return -EIO;
2934 i += ETH_ALEN;
2935 for ( ; i < pkt_size; i++) {
2936 if (data[i] != (u8)(i & 0xff))
2937 return -EIO;
2938 }
2939 return 0;
2940}
2941
e44758b7
MC
2942static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2943 int pkt_size)
f7dc1ea6 2944{
f7dc1ea6
MC
2945 struct tx_cmp *txcmp;
2946 int rc = -EIO;
2947 u32 raw_cons;
2948 u32 cons;
2949 int i;
2950
f7dc1ea6
MC
2951 raw_cons = cpr->cp_raw_cons;
2952 for (i = 0; i < 200; i++) {
2953 cons = RING_CMP(raw_cons);
2954 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2955
2956 if (!TX_CMP_VALID(txcmp, raw_cons)) {
2957 udelay(5);
2958 continue;
2959 }
2960
2961 /* The valid test of the entry must be done first before
2962 * reading any further.
2963 */
2964 dma_rmb();
2965 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
e44758b7 2966 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
f7dc1ea6
MC
2967 raw_cons = NEXT_RAW_CMP(raw_cons);
2968 raw_cons = NEXT_RAW_CMP(raw_cons);
2969 break;
2970 }
2971 raw_cons = NEXT_RAW_CMP(raw_cons);
2972 }
2973 cpr->cp_raw_cons = raw_cons;
2974 return rc;
2975}
2976
2977static int bnxt_run_loopback(struct bnxt *bp)
2978{
2979 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
84404d5f 2980 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
e44758b7 2981 struct bnxt_cp_ring_info *cpr;
f7dc1ea6
MC
2982 int pkt_size, i = 0;
2983 struct sk_buff *skb;
2984 dma_addr_t map;
2985 u8 *data;
2986 int rc;
2987
84404d5f
MC
2988 cpr = &rxr->bnapi->cp_ring;
2989 if (bp->flags & BNXT_FLAG_CHIP_P5)
2990 cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
f7dc1ea6
MC
2991 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
2992 skb = netdev_alloc_skb(bp->dev, pkt_size);
2993 if (!skb)
2994 return -ENOMEM;
2995 data = skb_put(skb, pkt_size);
2996 eth_broadcast_addr(data);
2997 i += ETH_ALEN;
2998 ether_addr_copy(&data[i], bp->dev->dev_addr);
2999 i += ETH_ALEN;
3000 for ( ; i < pkt_size; i++)
3001 data[i] = (u8)(i & 0xff);
3002
3003 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
3004 PCI_DMA_TODEVICE);
3005 if (dma_mapping_error(&bp->pdev->dev, map)) {
3006 dev_kfree_skb(skb);
3007 return -EIO;
3008 }
c1ba92a8 3009 bnxt_xmit_bd(bp, txr, map, pkt_size);
f7dc1ea6
MC
3010
3011 /* Sync BD data before updating doorbell */
3012 wmb();
3013
697197e5 3014 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
e44758b7 3015 rc = bnxt_poll_loopback(bp, cpr, pkt_size);
f7dc1ea6
MC
3016
3017 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
3018 dev_kfree_skb(skb);
3019 return rc;
3020}
3021
eb513658
MC
3022static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
3023{
3024 struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
3025 struct hwrm_selftest_exec_input req = {0};
3026 int rc;
3027
3028 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
3029 mutex_lock(&bp->hwrm_cmd_lock);
3030 resp->test_success = 0;
3031 req.flags = test_mask;
3032 rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
3033 *test_results = resp->test_success;
3034 mutex_unlock(&bp->hwrm_cmd_lock);
3035 return rc;
3036}
3037
55fd0cf3 3038#define BNXT_DRV_TESTS 4
f7dc1ea6 3039#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
91725d89 3040#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
55fd0cf3
MC
3041#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
3042#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
eb513658
MC
3043
3044static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
3045 u64 *buf)
3046{
3047 struct bnxt *bp = netdev_priv(dev);
55fd0cf3 3048 bool do_ext_lpbk = false;
eb513658
MC
3049 bool offline = false;
3050 u8 test_results = 0;
3051 u8 test_mask = 0;
d27e2ca1 3052 int rc = 0, i;
eb513658
MC
3053
3054 if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
3055 return;
3056 memset(buf, 0, sizeof(u64) * bp->num_tests);
3057 if (!netif_running(dev)) {
3058 etest->flags |= ETH_TEST_FL_FAILED;
3059 return;
3060 }
3061
55fd0cf3
MC
3062 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
3063 (bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK))
3064 do_ext_lpbk = true;
3065
eb513658
MC
3066 if (etest->flags & ETH_TEST_FL_OFFLINE) {
3067 if (bp->pf.active_vfs) {
3068 etest->flags |= ETH_TEST_FL_FAILED;
3069 netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
3070 return;
3071 }
3072 offline = true;
3073 }
3074
3075 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3076 u8 bit_val = 1 << i;
3077
3078 if (!(bp->test_info->offline_mask & bit_val))
3079 test_mask |= bit_val;
3080 else if (offline)
3081 test_mask |= bit_val;
3082 }
3083 if (!offline) {
3084 bnxt_run_fw_tests(bp, test_mask, &test_results);
3085 } else {
3086 rc = bnxt_close_nic(bp, false, false);
3087 if (rc)
3088 return;
3089 bnxt_run_fw_tests(bp, test_mask, &test_results);
f7dc1ea6
MC
3090
3091 buf[BNXT_MACLPBK_TEST_IDX] = 1;
3092 bnxt_hwrm_mac_loopback(bp, true);
3093 msleep(250);
3094 rc = bnxt_half_open_nic(bp);
3095 if (rc) {
3096 bnxt_hwrm_mac_loopback(bp, false);
3097 etest->flags |= ETH_TEST_FL_FAILED;
3098 return;
3099 }
3100 if (bnxt_run_loopback(bp))
3101 etest->flags |= ETH_TEST_FL_FAILED;
3102 else
3103 buf[BNXT_MACLPBK_TEST_IDX] = 0;
3104
f7dc1ea6 3105 bnxt_hwrm_mac_loopback(bp, false);
55fd0cf3 3106 bnxt_hwrm_phy_loopback(bp, true, false);
91725d89
MC
3107 msleep(1000);
3108 if (bnxt_run_loopback(bp)) {
3109 buf[BNXT_PHYLPBK_TEST_IDX] = 1;
3110 etest->flags |= ETH_TEST_FL_FAILED;
3111 }
55fd0cf3
MC
3112 if (do_ext_lpbk) {
3113 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
3114 bnxt_hwrm_phy_loopback(bp, true, true);
3115 msleep(1000);
3116 if (bnxt_run_loopback(bp)) {
3117 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
3118 etest->flags |= ETH_TEST_FL_FAILED;
3119 }
3120 }
3121 bnxt_hwrm_phy_loopback(bp, false, false);
91725d89 3122 bnxt_half_close_nic(bp);
d27e2ca1 3123 rc = bnxt_open_nic(bp, false, true);
eb513658 3124 }
d27e2ca1 3125 if (rc || bnxt_test_irq(bp)) {
67fea463
MC
3126 buf[BNXT_IRQ_TEST_IDX] = 1;
3127 etest->flags |= ETH_TEST_FL_FAILED;
3128 }
eb513658
MC
3129 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3130 u8 bit_val = 1 << i;
3131
3132 if ((test_mask & bit_val) && !(test_results & bit_val)) {
3133 buf[i] = 1;
3134 etest->flags |= ETH_TEST_FL_FAILED;
3135 }
3136 }
3137}
3138
49f7972f
VV
3139static int bnxt_reset(struct net_device *dev, u32 *flags)
3140{
3141 struct bnxt *bp = netdev_priv(dev);
8cec0940 3142 bool reload = false;
7a13240e
EP
3143 u32 req = *flags;
3144
3145 if (!req)
3146 return -EINVAL;
49f7972f
VV
3147
3148 if (!BNXT_PF(bp)) {
3149 netdev_err(dev, "Reset is not supported from a VF\n");
3150 return -EOPNOTSUPP;
3151 }
3152
0a3f4e4f
VV
3153 if (pci_vfs_assigned(bp->pdev) &&
3154 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
49f7972f
VV
3155 netdev_err(dev,
3156 "Reset not allowed when VFs are assigned to VMs\n");
3157 return -EBUSY;
3158 }
3159
7a13240e 3160 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
49f7972f 3161 /* This feature is not supported in older firmware versions */
7a13240e
EP
3162 if (bp->hwrm_spec_code >= 0x10803) {
3163 if (!bnxt_firmware_reset_chip(dev)) {
3164 netdev_info(dev, "Firmware reset request successful.\n");
3165 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
8cec0940 3166 reload = true;
7a13240e
EP
3167 *flags &= ~BNXT_FW_RESET_CHIP;
3168 }
3169 } else if (req == BNXT_FW_RESET_CHIP) {
3170 return -EOPNOTSUPP; /* only request, fail hard */
2373d8d6 3171 }
7a13240e 3172 }
6502ad59 3173
7a13240e
EP
3174 if (req & BNXT_FW_RESET_AP) {
3175 /* This feature is not supported in older firmware versions */
3176 if (bp->hwrm_spec_code >= 0x10803) {
3177 if (!bnxt_firmware_reset_ap(dev)) {
3178 netdev_info(dev, "Reset application processor successful.\n");
8cec0940 3179 reload = true;
7a13240e
EP
3180 *flags &= ~BNXT_FW_RESET_AP;
3181 }
3182 } else if (req == BNXT_FW_RESET_AP) {
3183 return -EOPNOTSUPP; /* only request, fail hard */
2373d8d6 3184 }
49f7972f
VV
3185 }
3186
8cec0940
EP
3187 if (reload)
3188 netdev_info(dev, "Reload driver to complete reset\n");
3189
7a13240e 3190 return 0;
49f7972f
VV
3191}
3192
6c5657d0
VV
3193static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
3194 struct bnxt_hwrm_dbg_dma_info *info)
3195{
3196 struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
3197 struct hwrm_dbg_cmn_input *cmn_req = msg;
3198 __le16 *seq_ptr = msg + info->seq_off;
3199 u16 seq = 0, len, segs_off;
3200 void *resp = cmn_resp;
3201 dma_addr_t dma_handle;
3202 int rc, off = 0;
3203 void *dma_buf;
3204
3205 dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
3206 GFP_KERNEL);
3207 if (!dma_buf)
3208 return -ENOMEM;
3209
3210 segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
3211 total_segments);
3212 cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
3213 cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
3214 mutex_lock(&bp->hwrm_cmd_lock);
3215 while (1) {
3216 *seq_ptr = cpu_to_le16(seq);
5b306bde
VV
3217 rc = _hwrm_send_message(bp, msg, msg_len,
3218 HWRM_COREDUMP_TIMEOUT);
6c5657d0
VV
3219 if (rc)
3220 break;
3221
3222 len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
3223 if (!seq &&
3224 cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
3225 info->segs = le16_to_cpu(*((__le16 *)(resp +
3226 segs_off)));
3227 if (!info->segs) {
3228 rc = -EIO;
3229 break;
3230 }
3231
3232 info->dest_buf_size = info->segs *
3233 sizeof(struct coredump_segment_record);
3234 info->dest_buf = kmalloc(info->dest_buf_size,
3235 GFP_KERNEL);
3236 if (!info->dest_buf) {
3237 rc = -ENOMEM;
3238 break;
3239 }
3240 }
3241
c74751f4
VV
3242 if (info->dest_buf) {
3243 if ((info->seg_start + off + len) <=
3244 BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
3245 memcpy(info->dest_buf + off, dma_buf, len);
3246 } else {
3247 rc = -ENOBUFS;
3248 break;
3249 }
3250 }
6c5657d0
VV
3251
3252 if (cmn_req->req_type ==
3253 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
3254 info->dest_buf_size += len;
3255
3256 if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
3257 break;
3258
3259 seq++;
3260 off += len;
3261 }
3262 mutex_unlock(&bp->hwrm_cmd_lock);
3263 dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
3264 return rc;
3265}
3266
3267static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
3268 struct bnxt_coredump *coredump)
3269{
3270 struct hwrm_dbg_coredump_list_input req = {0};
3271 struct bnxt_hwrm_dbg_dma_info info = {NULL};
3272 int rc;
3273
3274 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
3275
3276 info.dma_len = COREDUMP_LIST_BUF_LEN;
3277 info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
3278 info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
3279 data_len);
3280
3281 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
3282 if (!rc) {
3283 coredump->data = info.dest_buf;
3284 coredump->data_size = info.dest_buf_size;
3285 coredump->total_segs = info.segs;
3286 }
3287 return rc;
3288}
3289
3290static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
3291 u16 segment_id)
3292{
3293 struct hwrm_dbg_coredump_initiate_input req = {0};
3294
3295 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
3296 req.component_id = cpu_to_le16(component_id);
3297 req.segment_id = cpu_to_le16(segment_id);
3298
57a8730b 3299 return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
6c5657d0
VV
3300}
3301
3302static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
3303 u16 segment_id, u32 *seg_len,
c74751f4 3304 void *buf, u32 buf_len, u32 offset)
6c5657d0
VV
3305{
3306 struct hwrm_dbg_coredump_retrieve_input req = {0};
3307 struct bnxt_hwrm_dbg_dma_info info = {NULL};
3308 int rc;
3309
3310 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
3311 req.component_id = cpu_to_le16(component_id);
3312 req.segment_id = cpu_to_le16(segment_id);
3313
3314 info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
3315 info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
3316 seq_no);
3317 info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
3318 data_len);
c74751f4 3319 if (buf) {
6c5657d0 3320 info.dest_buf = buf + offset;
c74751f4
VV
3321 info.buf_len = buf_len;
3322 info.seg_start = offset;
3323 }
6c5657d0
VV
3324
3325 rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
3326 if (!rc)
3327 *seg_len = info.dest_buf_size;
3328
3329 return rc;
3330}
3331
3332static void
3333bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
3334 struct bnxt_coredump_segment_hdr *seg_hdr,
3335 struct coredump_segment_record *seg_rec, u32 seg_len,
3336 int status, u32 duration, u32 instance)
3337{
3338 memset(seg_hdr, 0, sizeof(*seg_hdr));
8605212a 3339 memcpy(seg_hdr->signature, "sEgM", 4);
6c5657d0
VV
3340 if (seg_rec) {
3341 seg_hdr->component_id = (__force __le32)seg_rec->component_id;
3342 seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
3343 seg_hdr->low_version = seg_rec->version_low;
3344 seg_hdr->high_version = seg_rec->version_hi;
3345 } else {
3346 /* For hwrm_ver_get response Component id = 2
3347 * and Segment id = 0
3348 */
3349 seg_hdr->component_id = cpu_to_le32(2);
3350 seg_hdr->segment_id = 0;
3351 }
3352 seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
3353 seg_hdr->length = cpu_to_le32(seg_len);
3354 seg_hdr->status = cpu_to_le32(status);
3355 seg_hdr->duration = cpu_to_le32(duration);
3356 seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
3357 seg_hdr->instance = cpu_to_le32(instance);
3358}
3359
3360static void
3361bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
3362 time64_t start, s16 start_utc, u16 total_segs,
3363 int status)
3364{
3365 time64_t end = ktime_get_real_seconds();
3366 u32 os_ver_major = 0, os_ver_minor = 0;
3367 struct tm tm;
3368
3369 time64_to_tm(start, 0, &tm);
3370 memset(record, 0, sizeof(*record));
8605212a 3371 memcpy(record->signature, "cOrE", 4);
6c5657d0
VV
3372 record->flags = 0;
3373 record->low_version = 0;
3374 record->high_version = 1;
3375 record->asic_state = 0;
3d46eee5
AB
3376 strlcpy(record->system_name, utsname()->nodename,
3377 sizeof(record->system_name));
8dc5ae2d
VV
3378 record->year = cpu_to_le16(tm.tm_year + 1900);
3379 record->month = cpu_to_le16(tm.tm_mon + 1);
6c5657d0
VV
3380 record->day = cpu_to_le16(tm.tm_mday);
3381 record->hour = cpu_to_le16(tm.tm_hour);
3382 record->minute = cpu_to_le16(tm.tm_min);
3383 record->second = cpu_to_le16(tm.tm_sec);
3384 record->utc_bias = cpu_to_le16(start_utc);
3385 strcpy(record->commandline, "ethtool -w");
3386 record->total_segments = cpu_to_le32(total_segs);
3387
3388 sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
3389 record->os_ver_major = cpu_to_le32(os_ver_major);
3390 record->os_ver_minor = cpu_to_le32(os_ver_minor);
3391
8605212a 3392 strlcpy(record->os_name, utsname()->sysname, 32);
6c5657d0
VV
3393 time64_to_tm(end, 0, &tm);
3394 record->end_year = cpu_to_le16(tm.tm_year + 1900);
3395 record->end_month = cpu_to_le16(tm.tm_mon + 1);
3396 record->end_day = cpu_to_le16(tm.tm_mday);
3397 record->end_hour = cpu_to_le16(tm.tm_hour);
3398 record->end_minute = cpu_to_le16(tm.tm_min);
3399 record->end_second = cpu_to_le16(tm.tm_sec);
3400 record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
3401 record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
3402 bp->ver_resp.chip_rev << 8 |
3403 bp->ver_resp.chip_metal);
3404 record->asic_id2 = 0;
3405 record->coredump_status = cpu_to_le32(status);
3406 record->ioctl_low_version = 0;
3407 record->ioctl_high_version = 0;
3408}
3409
3410static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
3411{
3412 u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
c74751f4 3413 u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
6c5657d0 3414 struct coredump_segment_record *seg_record = NULL;
6c5657d0 3415 struct bnxt_coredump_segment_hdr seg_hdr;
6c5657d0
VV
3416 struct bnxt_coredump coredump = {NULL};
3417 time64_t start_time;
3418 u16 start_utc;
3419 int rc = 0, i;
3420
c74751f4
VV
3421 if (buf)
3422 buf_len = *dump_len;
3423
6c5657d0
VV
3424 start_time = ktime_get_real_seconds();
3425 start_utc = sys_tz.tz_minuteswest * 60;
3426 seg_hdr_len = sizeof(seg_hdr);
3427
3428 /* First segment should be hwrm_ver_get response */
3429 *dump_len = seg_hdr_len + ver_get_resp_len;
3430 if (buf) {
3431 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
3432 0, 0, 0);
3433 memcpy(buf + offset, &seg_hdr, seg_hdr_len);
3434 offset += seg_hdr_len;
3435 memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
3436 offset += ver_get_resp_len;
3437 }
3438
3439 rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
3440 if (rc) {
3441 netdev_err(bp->dev, "Failed to get coredump segment list\n");
3442 goto err;
3443 }
3444
3445 *dump_len += seg_hdr_len * coredump.total_segs;
3446
3447 seg_record = (struct coredump_segment_record *)coredump.data;
3448 seg_record_len = sizeof(*seg_record);
3449
3450 for (i = 0; i < coredump.total_segs; i++) {
3451 u16 comp_id = le16_to_cpu(seg_record->component_id);
3452 u16 seg_id = le16_to_cpu(seg_record->segment_id);
3453 u32 duration = 0, seg_len = 0;
3454 unsigned long start, end;
3455
c74751f4
VV
3456 if (buf && ((offset + seg_hdr_len) >
3457 BNXT_COREDUMP_BUF_LEN(buf_len))) {
3458 rc = -ENOBUFS;
3459 goto err;
3460 }
3461
6c5657d0
VV
3462 start = jiffies;
3463
3464 rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
3465 if (rc) {
3466 netdev_err(bp->dev,
3467 "Failed to initiate coredump for seg = %d\n",
3468 seg_record->segment_id);
3469 goto next_seg;
3470 }
3471
3472 /* Write segment data into the buffer */
3473 rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
c74751f4 3474 &seg_len, buf, buf_len,
6c5657d0 3475 offset + seg_hdr_len);
c74751f4
VV
3476 if (rc && rc == -ENOBUFS)
3477 goto err;
3478 else if (rc)
6c5657d0
VV
3479 netdev_err(bp->dev,
3480 "Failed to retrieve coredump for seg = %d\n",
3481 seg_record->segment_id);
3482
3483next_seg:
3484 end = jiffies;
3485 duration = jiffies_to_msecs(end - start);
3486 bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
3487 rc, duration, 0);
3488
3489 if (buf) {
3490 /* Write segment header into the buffer */
3491 memcpy(buf + offset, &seg_hdr, seg_hdr_len);
3492 offset += seg_hdr_len + seg_len;
3493 }
3494
3495 *dump_len += seg_len;
3496 seg_record =
3497 (struct coredump_segment_record *)((u8 *)seg_record +
3498 seg_record_len);
3499 }
3500
3501err:
1bbf3aed
AB
3502 if (buf)
3503 bnxt_fill_coredump_record(bp, buf + offset, start_time,
6c5657d0
VV
3504 start_utc, coredump.total_segs + 1,
3505 rc);
6c5657d0 3506 kfree(coredump.data);
1bbf3aed 3507 *dump_len += sizeof(struct bnxt_coredump_record);
c74751f4 3508 if (rc == -ENOBUFS)
9a005c38 3509 netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
6c5657d0
VV
3510 return rc;
3511}
3512
0b0eacf3
VV
3513static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
3514{
3515 struct bnxt *bp = netdev_priv(dev);
3516
3517 if (dump->flag > BNXT_DUMP_CRASH) {
3518 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
3519 return -EINVAL;
3520 }
3521
3522 if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
3523 netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
3524 return -EOPNOTSUPP;
3525 }
3526
3527 bp->dump_flag = dump->flag;
3528 return 0;
3529}
3530
6c5657d0
VV
3531static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
3532{
3533 struct bnxt *bp = netdev_priv(dev);
3534
3535 if (bp->hwrm_spec_code < 0x10801)
3536 return -EOPNOTSUPP;
3537
3538 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
3539 bp->ver_resp.hwrm_fw_min_8b << 16 |
3540 bp->ver_resp.hwrm_fw_bld_8b << 8 |
3541 bp->ver_resp.hwrm_fw_rsvd_8b;
3542
0b0eacf3
VV
3543 dump->flag = bp->dump_flag;
3544 if (bp->dump_flag == BNXT_DUMP_CRASH)
3545 dump->len = BNXT_CRASH_DUMP_LEN;
3546 else
3547 bnxt_get_coredump(bp, NULL, &dump->len);
3548 return 0;
6c5657d0
VV
3549}
3550
3551static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
3552 void *buf)
3553{
3554 struct bnxt *bp = netdev_priv(dev);
3555
3556 if (bp->hwrm_spec_code < 0x10801)
3557 return -EOPNOTSUPP;
3558
3559 memset(buf, 0, dump->len);
3560
0b0eacf3
VV
3561 dump->flag = bp->dump_flag;
3562 if (dump->flag == BNXT_DUMP_CRASH) {
3563#ifdef CONFIG_TEE_BNXT_FW
3564 return tee_bnxt_copy_coredump(buf, 0, dump->len);
3565#endif
3566 } else {
3567 return bnxt_get_coredump(bp, buf, &dump->len);
3568 }
3569
3570 return 0;
6c5657d0
VV
3571}
3572
eb513658
MC
3573void bnxt_ethtool_init(struct bnxt *bp)
3574{
3575 struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
3576 struct hwrm_selftest_qlist_input req = {0};
3577 struct bnxt_test_info *test_info;
431aa1eb 3578 struct net_device *dev = bp->dev;
eb513658
MC
3579 int i, rc;
3580
691aa620
VV
3581 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
3582 bnxt_get_pkgver(dev);
431aa1eb 3583
ba642ab7 3584 bp->num_tests = 0;
eb513658
MC
3585 if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
3586 return;
3587
3588 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
3589 mutex_lock(&bp->hwrm_cmd_lock);
3590 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3591 if (rc)
3592 goto ethtool_init_exit;
3593
ba642ab7
MC
3594 test_info = bp->test_info;
3595 if (!test_info)
3596 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
eb513658
MC
3597 if (!test_info)
3598 goto ethtool_init_exit;
3599
3600 bp->test_info = test_info;
3601 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
3602 if (bp->num_tests > BNXT_MAX_TEST)
3603 bp->num_tests = BNXT_MAX_TEST;
3604
3605 test_info->offline_mask = resp->offline_tests;
3606 test_info->timeout = le16_to_cpu(resp->test_timeout);
3607 if (!test_info->timeout)
3608 test_info->timeout = HWRM_CMD_TIMEOUT;
3609 for (i = 0; i < bp->num_tests; i++) {
3610 char *str = test_info->string[i];
3611 char *fw_str = resp->test0_name + i * 32;
3612
f7dc1ea6
MC
3613 if (i == BNXT_MACLPBK_TEST_IDX) {
3614 strcpy(str, "Mac loopback test (offline)");
91725d89
MC
3615 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
3616 strcpy(str, "Phy loopback test (offline)");
55fd0cf3
MC
3617 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
3618 strcpy(str, "Ext loopback test (offline)");
67fea463
MC
3619 } else if (i == BNXT_IRQ_TEST_IDX) {
3620 strcpy(str, "Interrupt_test (offline)");
f7dc1ea6
MC
3621 } else {
3622 strlcpy(str, fw_str, ETH_GSTRING_LEN);
3623 strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
3624 if (test_info->offline_mask & (1 << i))
3625 strncat(str, " (offline)",
3626 ETH_GSTRING_LEN - strlen(str));
3627 else
3628 strncat(str, " (online)",
3629 ETH_GSTRING_LEN - strlen(str));
3630 }
eb513658
MC
3631 }
3632
3633ethtool_init_exit:
3634 mutex_unlock(&bp->hwrm_cmd_lock);
3635}
3636
3637void bnxt_ethtool_free(struct bnxt *bp)
3638{
3639 kfree(bp->test_info);
3640 bp->test_info = NULL;
3641}
3642
c0c050c5 3643const struct ethtool_ops bnxt_ethtool_ops = {
f704d243
JK
3644 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3645 ETHTOOL_COALESCE_MAX_FRAMES |
3646 ETHTOOL_COALESCE_USECS_IRQ |
3647 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
3648 ETHTOOL_COALESCE_STATS_BLOCK_USECS |
3649 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
00c04a92
MC
3650 .get_link_ksettings = bnxt_get_link_ksettings,
3651 .set_link_ksettings = bnxt_set_link_ksettings,
c0c050c5
MC
3652 .get_pauseparam = bnxt_get_pauseparam,
3653 .set_pauseparam = bnxt_set_pauseparam,
3654 .get_drvinfo = bnxt_get_drvinfo,
b5d600b0
VV
3655 .get_regs_len = bnxt_get_regs_len,
3656 .get_regs = bnxt_get_regs,
8e202366 3657 .get_wol = bnxt_get_wol,
5282db6c 3658 .set_wol = bnxt_set_wol,
c0c050c5
MC
3659 .get_coalesce = bnxt_get_coalesce,
3660 .set_coalesce = bnxt_set_coalesce,
3661 .get_msglevel = bnxt_get_msglevel,
3662 .set_msglevel = bnxt_set_msglevel,
3663 .get_sset_count = bnxt_get_sset_count,
3664 .get_strings = bnxt_get_strings,
3665 .get_ethtool_stats = bnxt_get_ethtool_stats,
3666 .set_ringparam = bnxt_set_ringparam,
3667 .get_ringparam = bnxt_get_ringparam,
3668 .get_channels = bnxt_get_channels,
3669 .set_channels = bnxt_set_channels,
c0c050c5 3670 .get_rxnfc = bnxt_get_rxnfc,
a011952a 3671 .set_rxnfc = bnxt_set_rxnfc,
c0c050c5
MC
3672 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
3673 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
3674 .get_rxfh = bnxt_get_rxfh,
bd3191b5 3675 .set_rxfh = bnxt_set_rxfh,
c0c050c5
MC
3676 .flash_device = bnxt_flash_device,
3677 .get_eeprom_len = bnxt_get_eeprom_len,
3678 .get_eeprom = bnxt_get_eeprom,
3679 .set_eeprom = bnxt_set_eeprom,
3680 .get_link = bnxt_get_link,
72b34f04
MC
3681 .get_eee = bnxt_get_eee,
3682 .set_eee = bnxt_set_eee,
42ee18fe
AK
3683 .get_module_info = bnxt_get_module_info,
3684 .get_module_eeprom = bnxt_get_module_eeprom,
5ad2cbee
MC
3685 .nway_reset = bnxt_nway_reset,
3686 .set_phys_id = bnxt_set_phys_id,
eb513658 3687 .self_test = bnxt_self_test,
49f7972f 3688 .reset = bnxt_reset,
0b0eacf3 3689 .set_dump = bnxt_set_dump,
6c5657d0
VV
3690 .get_dump_flag = bnxt_get_dump_flag,
3691 .get_dump_data = bnxt_get_dump_data,
c0c050c5 3692};