Merge tag 'amlogic-dt-2' of https://git.kernel.org/pub/scm/linux/kernel/git/khilman...
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_ethtool.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/ctype.h>
12 #include <linux/stringify.h>
13 #include <linux/ethtool.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/etherdevice.h>
17 #include <linux/crc32.h>
18 #include <linux/firmware.h>
19 #include "bnxt_hsi.h"
20 #include "bnxt.h"
21 #include "bnxt_xdp.h"
22 #include "bnxt_ethtool.h"
23 #include "bnxt_nvm_defs.h"      /* NVRAM content constant and structure defs */
24 #include "bnxt_fw_hdr.h"        /* Firmware hdr constant and structure defs */
25 #define FLASH_NVRAM_TIMEOUT     ((HWRM_CMD_TIMEOUT) * 100)
26 #define FLASH_PACKAGE_TIMEOUT   ((HWRM_CMD_TIMEOUT) * 200)
27 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
28
29 static u32 bnxt_get_msglevel(struct net_device *dev)
30 {
31         struct bnxt *bp = netdev_priv(dev);
32
33         return bp->msg_enable;
34 }
35
36 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
37 {
38         struct bnxt *bp = netdev_priv(dev);
39
40         bp->msg_enable = value;
41 }
42
43 static int bnxt_get_coalesce(struct net_device *dev,
44                              struct ethtool_coalesce *coal)
45 {
46         struct bnxt *bp = netdev_priv(dev);
47         struct bnxt_coal *hw_coal;
48         u16 mult;
49
50         memset(coal, 0, sizeof(*coal));
51
52         coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
53
54         hw_coal = &bp->rx_coal;
55         mult = hw_coal->bufs_per_record;
56         coal->rx_coalesce_usecs = hw_coal->coal_ticks;
57         coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
58         coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
59         coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
60
61         hw_coal = &bp->tx_coal;
62         mult = hw_coal->bufs_per_record;
63         coal->tx_coalesce_usecs = hw_coal->coal_ticks;
64         coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
65         coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
66         coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
67
68         coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
69
70         return 0;
71 }
72
73 static int bnxt_set_coalesce(struct net_device *dev,
74                              struct ethtool_coalesce *coal)
75 {
76         struct bnxt *bp = netdev_priv(dev);
77         bool update_stats = false;
78         struct bnxt_coal *hw_coal;
79         int rc = 0;
80         u16 mult;
81
82         if (coal->use_adaptive_rx_coalesce) {
83                 bp->flags |= BNXT_FLAG_DIM;
84         } else {
85                 if (bp->flags & BNXT_FLAG_DIM) {
86                         bp->flags &= ~(BNXT_FLAG_DIM);
87                         goto reset_coalesce;
88                 }
89         }
90
91         hw_coal = &bp->rx_coal;
92         mult = hw_coal->bufs_per_record;
93         hw_coal->coal_ticks = coal->rx_coalesce_usecs;
94         hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
95         hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
96         hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
97
98         hw_coal = &bp->tx_coal;
99         mult = hw_coal->bufs_per_record;
100         hw_coal->coal_ticks = coal->tx_coalesce_usecs;
101         hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
102         hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
103         hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
104
105         if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
106                 u32 stats_ticks = coal->stats_block_coalesce_usecs;
107
108                 /* Allow 0, which means disable. */
109                 if (stats_ticks)
110                         stats_ticks = clamp_t(u32, stats_ticks,
111                                               BNXT_MIN_STATS_COAL_TICKS,
112                                               BNXT_MAX_STATS_COAL_TICKS);
113                 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
114                 bp->stats_coal_ticks = stats_ticks;
115                 update_stats = true;
116         }
117
118 reset_coalesce:
119         if (netif_running(dev)) {
120                 if (update_stats) {
121                         rc = bnxt_close_nic(bp, true, false);
122                         if (!rc)
123                                 rc = bnxt_open_nic(bp, true, false);
124                 } else {
125                         rc = bnxt_hwrm_set_coal(bp);
126                 }
127         }
128
129         return rc;
130 }
131
132 #define BNXT_NUM_STATS  21
133
134 #define BNXT_RX_STATS_ENTRY(counter)    \
135         { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
136
137 #define BNXT_TX_STATS_ENTRY(counter)    \
138         { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
139
140 #define BNXT_RX_STATS_EXT_ENTRY(counter)        \
141         { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
142
143 static const struct {
144         long offset;
145         char string[ETH_GSTRING_LEN];
146 } bnxt_port_stats_arr[] = {
147         BNXT_RX_STATS_ENTRY(rx_64b_frames),
148         BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
149         BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
150         BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
151         BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
152         BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
153         BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
154         BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
155         BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
156         BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
157         BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
158         BNXT_RX_STATS_ENTRY(rx_total_frames),
159         BNXT_RX_STATS_ENTRY(rx_ucast_frames),
160         BNXT_RX_STATS_ENTRY(rx_mcast_frames),
161         BNXT_RX_STATS_ENTRY(rx_bcast_frames),
162         BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
163         BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
164         BNXT_RX_STATS_ENTRY(rx_pause_frames),
165         BNXT_RX_STATS_ENTRY(rx_pfc_frames),
166         BNXT_RX_STATS_ENTRY(rx_align_err_frames),
167         BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
168         BNXT_RX_STATS_ENTRY(rx_jbr_frames),
169         BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
170         BNXT_RX_STATS_ENTRY(rx_tagged_frames),
171         BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
172         BNXT_RX_STATS_ENTRY(rx_good_frames),
173         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
174         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
175         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
176         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
177         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
178         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
179         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
180         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
181         BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
182         BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
183         BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
184         BNXT_RX_STATS_ENTRY(rx_bytes),
185         BNXT_RX_STATS_ENTRY(rx_runt_bytes),
186         BNXT_RX_STATS_ENTRY(rx_runt_frames),
187         BNXT_RX_STATS_ENTRY(rx_stat_discard),
188         BNXT_RX_STATS_ENTRY(rx_stat_err),
189
190         BNXT_TX_STATS_ENTRY(tx_64b_frames),
191         BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
192         BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
193         BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
194         BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
195         BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
196         BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
197         BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
198         BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
199         BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
200         BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
201         BNXT_TX_STATS_ENTRY(tx_good_frames),
202         BNXT_TX_STATS_ENTRY(tx_total_frames),
203         BNXT_TX_STATS_ENTRY(tx_ucast_frames),
204         BNXT_TX_STATS_ENTRY(tx_mcast_frames),
205         BNXT_TX_STATS_ENTRY(tx_bcast_frames),
206         BNXT_TX_STATS_ENTRY(tx_pause_frames),
207         BNXT_TX_STATS_ENTRY(tx_pfc_frames),
208         BNXT_TX_STATS_ENTRY(tx_jabber_frames),
209         BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
210         BNXT_TX_STATS_ENTRY(tx_err),
211         BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
212         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
213         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
214         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
215         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
216         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
217         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
218         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
219         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
220         BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
221         BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
222         BNXT_TX_STATS_ENTRY(tx_total_collisions),
223         BNXT_TX_STATS_ENTRY(tx_bytes),
224         BNXT_TX_STATS_ENTRY(tx_xthol_frames),
225         BNXT_TX_STATS_ENTRY(tx_stat_discard),
226         BNXT_TX_STATS_ENTRY(tx_stat_error),
227 };
228
229 static const struct {
230         long offset;
231         char string[ETH_GSTRING_LEN];
232 } bnxt_port_stats_ext_arr[] = {
233         BNXT_RX_STATS_EXT_ENTRY(link_down_events),
234         BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
235         BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
236         BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
237         BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
238 };
239
240 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
241 #define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
242
243 static int bnxt_get_num_stats(struct bnxt *bp)
244 {
245         int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
246
247         if (bp->flags & BNXT_FLAG_PORT_STATS)
248                 num_stats += BNXT_NUM_PORT_STATS;
249
250         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
251                 num_stats += BNXT_NUM_PORT_STATS_EXT;
252
253         return num_stats;
254 }
255
256 static int bnxt_get_sset_count(struct net_device *dev, int sset)
257 {
258         struct bnxt *bp = netdev_priv(dev);
259
260         switch (sset) {
261         case ETH_SS_STATS:
262                 return bnxt_get_num_stats(bp);
263         case ETH_SS_TEST:
264                 if (!bp->num_tests)
265                         return -EOPNOTSUPP;
266                 return bp->num_tests;
267         default:
268                 return -EOPNOTSUPP;
269         }
270 }
271
272 static void bnxt_get_ethtool_stats(struct net_device *dev,
273                                    struct ethtool_stats *stats, u64 *buf)
274 {
275         u32 i, j = 0;
276         struct bnxt *bp = netdev_priv(dev);
277         u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
278
279         if (!bp->bnapi)
280                 return;
281
282         for (i = 0; i < bp->cp_nr_rings; i++) {
283                 struct bnxt_napi *bnapi = bp->bnapi[i];
284                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
285                 __le64 *hw_stats = (__le64 *)cpr->hw_stats;
286                 int k;
287
288                 for (k = 0; k < stat_fields; j++, k++)
289                         buf[j] = le64_to_cpu(hw_stats[k]);
290                 buf[j++] = cpr->rx_l4_csum_errors;
291         }
292         if (bp->flags & BNXT_FLAG_PORT_STATS) {
293                 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
294
295                 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
296                         buf[j] = le64_to_cpu(*(port_stats +
297                                                bnxt_port_stats_arr[i].offset));
298                 }
299         }
300         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
301                 __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
302
303                 for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) {
304                         buf[j] = le64_to_cpu(*(port_stats_ext +
305                                             bnxt_port_stats_ext_arr[i].offset));
306                 }
307         }
308 }
309
310 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
311 {
312         struct bnxt *bp = netdev_priv(dev);
313         u32 i;
314
315         switch (stringset) {
316         /* The number of strings must match BNXT_NUM_STATS defined above. */
317         case ETH_SS_STATS:
318                 for (i = 0; i < bp->cp_nr_rings; i++) {
319                         sprintf(buf, "[%d]: rx_ucast_packets", i);
320                         buf += ETH_GSTRING_LEN;
321                         sprintf(buf, "[%d]: rx_mcast_packets", i);
322                         buf += ETH_GSTRING_LEN;
323                         sprintf(buf, "[%d]: rx_bcast_packets", i);
324                         buf += ETH_GSTRING_LEN;
325                         sprintf(buf, "[%d]: rx_discards", i);
326                         buf += ETH_GSTRING_LEN;
327                         sprintf(buf, "[%d]: rx_drops", i);
328                         buf += ETH_GSTRING_LEN;
329                         sprintf(buf, "[%d]: rx_ucast_bytes", i);
330                         buf += ETH_GSTRING_LEN;
331                         sprintf(buf, "[%d]: rx_mcast_bytes", i);
332                         buf += ETH_GSTRING_LEN;
333                         sprintf(buf, "[%d]: rx_bcast_bytes", i);
334                         buf += ETH_GSTRING_LEN;
335                         sprintf(buf, "[%d]: tx_ucast_packets", i);
336                         buf += ETH_GSTRING_LEN;
337                         sprintf(buf, "[%d]: tx_mcast_packets", i);
338                         buf += ETH_GSTRING_LEN;
339                         sprintf(buf, "[%d]: tx_bcast_packets", i);
340                         buf += ETH_GSTRING_LEN;
341                         sprintf(buf, "[%d]: tx_discards", i);
342                         buf += ETH_GSTRING_LEN;
343                         sprintf(buf, "[%d]: tx_drops", i);
344                         buf += ETH_GSTRING_LEN;
345                         sprintf(buf, "[%d]: tx_ucast_bytes", i);
346                         buf += ETH_GSTRING_LEN;
347                         sprintf(buf, "[%d]: tx_mcast_bytes", i);
348                         buf += ETH_GSTRING_LEN;
349                         sprintf(buf, "[%d]: tx_bcast_bytes", i);
350                         buf += ETH_GSTRING_LEN;
351                         sprintf(buf, "[%d]: tpa_packets", i);
352                         buf += ETH_GSTRING_LEN;
353                         sprintf(buf, "[%d]: tpa_bytes", i);
354                         buf += ETH_GSTRING_LEN;
355                         sprintf(buf, "[%d]: tpa_events", i);
356                         buf += ETH_GSTRING_LEN;
357                         sprintf(buf, "[%d]: tpa_aborts", i);
358                         buf += ETH_GSTRING_LEN;
359                         sprintf(buf, "[%d]: rx_l4_csum_errors", i);
360                         buf += ETH_GSTRING_LEN;
361                 }
362                 if (bp->flags & BNXT_FLAG_PORT_STATS) {
363                         for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
364                                 strcpy(buf, bnxt_port_stats_arr[i].string);
365                                 buf += ETH_GSTRING_LEN;
366                         }
367                 }
368                 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
369                         for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) {
370                                 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
371                                 buf += ETH_GSTRING_LEN;
372                         }
373                 }
374                 break;
375         case ETH_SS_TEST:
376                 if (bp->num_tests)
377                         memcpy(buf, bp->test_info->string,
378                                bp->num_tests * ETH_GSTRING_LEN);
379                 break;
380         default:
381                 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
382                            stringset);
383                 break;
384         }
385 }
386
387 static void bnxt_get_ringparam(struct net_device *dev,
388                                struct ethtool_ringparam *ering)
389 {
390         struct bnxt *bp = netdev_priv(dev);
391
392         ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
393         ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
394         ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
395
396         ering->rx_pending = bp->rx_ring_size;
397         ering->rx_jumbo_pending = bp->rx_agg_ring_size;
398         ering->tx_pending = bp->tx_ring_size;
399 }
400
401 static int bnxt_set_ringparam(struct net_device *dev,
402                               struct ethtool_ringparam *ering)
403 {
404         struct bnxt *bp = netdev_priv(dev);
405
406         if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
407             (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
408             (ering->tx_pending <= MAX_SKB_FRAGS))
409                 return -EINVAL;
410
411         if (netif_running(dev))
412                 bnxt_close_nic(bp, false, false);
413
414         bp->rx_ring_size = ering->rx_pending;
415         bp->tx_ring_size = ering->tx_pending;
416         bnxt_set_ring_params(bp);
417
418         if (netif_running(dev))
419                 return bnxt_open_nic(bp, false, false);
420
421         return 0;
422 }
423
424 static void bnxt_get_channels(struct net_device *dev,
425                               struct ethtool_channels *channel)
426 {
427         struct bnxt *bp = netdev_priv(dev);
428         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
429         int max_rx_rings, max_tx_rings, tcs;
430         int max_tx_sch_inputs;
431
432         /* Get the most up-to-date max_tx_sch_inputs. */
433         if (bp->flags & BNXT_FLAG_NEW_RM)
434                 bnxt_hwrm_func_resc_qcaps(bp, false);
435         max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
436
437         bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
438         if (max_tx_sch_inputs)
439                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
440         channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
441
442         if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
443                 max_rx_rings = 0;
444                 max_tx_rings = 0;
445         }
446         if (max_tx_sch_inputs)
447                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
448
449         tcs = netdev_get_num_tc(dev);
450         if (tcs > 1)
451                 max_tx_rings /= tcs;
452
453         channel->max_rx = max_rx_rings;
454         channel->max_tx = max_tx_rings;
455         channel->max_other = 0;
456         if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
457                 channel->combined_count = bp->rx_nr_rings;
458                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
459                         channel->combined_count--;
460         } else {
461                 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
462                         channel->rx_count = bp->rx_nr_rings;
463                         channel->tx_count = bp->tx_nr_rings_per_tc;
464                 }
465         }
466 }
467
468 static int bnxt_set_channels(struct net_device *dev,
469                              struct ethtool_channels *channel)
470 {
471         struct bnxt *bp = netdev_priv(dev);
472         int req_tx_rings, req_rx_rings, tcs;
473         bool sh = false;
474         int tx_xdp = 0;
475         int rc = 0;
476
477         if (channel->other_count)
478                 return -EINVAL;
479
480         if (!channel->combined_count &&
481             (!channel->rx_count || !channel->tx_count))
482                 return -EINVAL;
483
484         if (channel->combined_count &&
485             (channel->rx_count || channel->tx_count))
486                 return -EINVAL;
487
488         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
489                                             channel->tx_count))
490                 return -EINVAL;
491
492         if (channel->combined_count)
493                 sh = true;
494
495         tcs = netdev_get_num_tc(dev);
496
497         req_tx_rings = sh ? channel->combined_count : channel->tx_count;
498         req_rx_rings = sh ? channel->combined_count : channel->rx_count;
499         if (bp->tx_nr_rings_xdp) {
500                 if (!sh) {
501                         netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
502                         return -EINVAL;
503                 }
504                 tx_xdp = req_rx_rings;
505         }
506         rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
507         if (rc) {
508                 netdev_warn(dev, "Unable to allocate the requested rings\n");
509                 return rc;
510         }
511
512         if (netif_running(dev)) {
513                 if (BNXT_PF(bp)) {
514                         /* TODO CHIMP_FW: Send message to all VF's
515                          * before PF unload
516                          */
517                 }
518                 rc = bnxt_close_nic(bp, true, false);
519                 if (rc) {
520                         netdev_err(bp->dev, "Set channel failure rc :%x\n",
521                                    rc);
522                         return rc;
523                 }
524         }
525
526         if (sh) {
527                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
528                 bp->rx_nr_rings = channel->combined_count;
529                 bp->tx_nr_rings_per_tc = channel->combined_count;
530         } else {
531                 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
532                 bp->rx_nr_rings = channel->rx_count;
533                 bp->tx_nr_rings_per_tc = channel->tx_count;
534         }
535         bp->tx_nr_rings_xdp = tx_xdp;
536         bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
537         if (tcs > 1)
538                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
539
540         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
541                                bp->tx_nr_rings + bp->rx_nr_rings;
542
543         bp->num_stat_ctxs = bp->cp_nr_rings;
544
545         /* After changing number of rx channels, update NTUPLE feature. */
546         netdev_update_features(dev);
547         if (netif_running(dev)) {
548                 rc = bnxt_open_nic(bp, true, false);
549                 if ((!rc) && BNXT_PF(bp)) {
550                         /* TODO CHIMP_FW: Send message to all VF's
551                          * to renable
552                          */
553                 }
554         }
555
556         return rc;
557 }
558
559 #ifdef CONFIG_RFS_ACCEL
560 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
561                             u32 *rule_locs)
562 {
563         int i, j = 0;
564
565         cmd->data = bp->ntp_fltr_count;
566         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
567                 struct hlist_head *head;
568                 struct bnxt_ntuple_filter *fltr;
569
570                 head = &bp->ntp_fltr_hash_tbl[i];
571                 rcu_read_lock();
572                 hlist_for_each_entry_rcu(fltr, head, hash) {
573                         if (j == cmd->rule_cnt)
574                                 break;
575                         rule_locs[j++] = fltr->sw_id;
576                 }
577                 rcu_read_unlock();
578                 if (j == cmd->rule_cnt)
579                         break;
580         }
581         cmd->rule_cnt = j;
582         return 0;
583 }
584
585 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
586 {
587         struct ethtool_rx_flow_spec *fs =
588                 (struct ethtool_rx_flow_spec *)&cmd->fs;
589         struct bnxt_ntuple_filter *fltr;
590         struct flow_keys *fkeys;
591         int i, rc = -EINVAL;
592
593         if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
594                 return rc;
595
596         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
597                 struct hlist_head *head;
598
599                 head = &bp->ntp_fltr_hash_tbl[i];
600                 rcu_read_lock();
601                 hlist_for_each_entry_rcu(fltr, head, hash) {
602                         if (fltr->sw_id == fs->location)
603                                 goto fltr_found;
604                 }
605                 rcu_read_unlock();
606         }
607         return rc;
608
609 fltr_found:
610         fkeys = &fltr->fkeys;
611         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
612                 if (fkeys->basic.ip_proto == IPPROTO_TCP)
613                         fs->flow_type = TCP_V4_FLOW;
614                 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
615                         fs->flow_type = UDP_V4_FLOW;
616                 else
617                         goto fltr_err;
618
619                 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
620                 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
621
622                 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
623                 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
624
625                 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
626                 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
627
628                 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
629                 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
630         } else {
631                 int i;
632
633                 if (fkeys->basic.ip_proto == IPPROTO_TCP)
634                         fs->flow_type = TCP_V6_FLOW;
635                 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
636                         fs->flow_type = UDP_V6_FLOW;
637                 else
638                         goto fltr_err;
639
640                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
641                         fkeys->addrs.v6addrs.src;
642                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
643                         fkeys->addrs.v6addrs.dst;
644                 for (i = 0; i < 4; i++) {
645                         fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
646                         fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
647                 }
648                 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
649                 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
650
651                 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
652                 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
653         }
654
655         fs->ring_cookie = fltr->rxq;
656         rc = 0;
657
658 fltr_err:
659         rcu_read_unlock();
660
661         return rc;
662 }
663 #endif
664
665 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
666 {
667         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
668                 return RXH_IP_SRC | RXH_IP_DST;
669         return 0;
670 }
671
672 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
673 {
674         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
675                 return RXH_IP_SRC | RXH_IP_DST;
676         return 0;
677 }
678
679 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
680 {
681         cmd->data = 0;
682         switch (cmd->flow_type) {
683         case TCP_V4_FLOW:
684                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
685                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
686                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
687                 cmd->data |= get_ethtool_ipv4_rss(bp);
688                 break;
689         case UDP_V4_FLOW:
690                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
691                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
692                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
693                 /* fall through */
694         case SCTP_V4_FLOW:
695         case AH_ESP_V4_FLOW:
696         case AH_V4_FLOW:
697         case ESP_V4_FLOW:
698         case IPV4_FLOW:
699                 cmd->data |= get_ethtool_ipv4_rss(bp);
700                 break;
701
702         case TCP_V6_FLOW:
703                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
704                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
705                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
706                 cmd->data |= get_ethtool_ipv6_rss(bp);
707                 break;
708         case UDP_V6_FLOW:
709                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
710                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
711                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
712                 /* fall through */
713         case SCTP_V6_FLOW:
714         case AH_ESP_V6_FLOW:
715         case AH_V6_FLOW:
716         case ESP_V6_FLOW:
717         case IPV6_FLOW:
718                 cmd->data |= get_ethtool_ipv6_rss(bp);
719                 break;
720         }
721         return 0;
722 }
723
724 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
725 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
726
727 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
728 {
729         u32 rss_hash_cfg = bp->rss_hash_cfg;
730         int tuple, rc = 0;
731
732         if (cmd->data == RXH_4TUPLE)
733                 tuple = 4;
734         else if (cmd->data == RXH_2TUPLE)
735                 tuple = 2;
736         else if (!cmd->data)
737                 tuple = 0;
738         else
739                 return -EINVAL;
740
741         if (cmd->flow_type == TCP_V4_FLOW) {
742                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
743                 if (tuple == 4)
744                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
745         } else if (cmd->flow_type == UDP_V4_FLOW) {
746                 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
747                         return -EINVAL;
748                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
749                 if (tuple == 4)
750                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
751         } else if (cmd->flow_type == TCP_V6_FLOW) {
752                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
753                 if (tuple == 4)
754                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
755         } else if (cmd->flow_type == UDP_V6_FLOW) {
756                 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
757                         return -EINVAL;
758                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
759                 if (tuple == 4)
760                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
761         } else if (tuple == 4) {
762                 return -EINVAL;
763         }
764
765         switch (cmd->flow_type) {
766         case TCP_V4_FLOW:
767         case UDP_V4_FLOW:
768         case SCTP_V4_FLOW:
769         case AH_ESP_V4_FLOW:
770         case AH_V4_FLOW:
771         case ESP_V4_FLOW:
772         case IPV4_FLOW:
773                 if (tuple == 2)
774                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
775                 else if (!tuple)
776                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
777                 break;
778
779         case TCP_V6_FLOW:
780         case UDP_V6_FLOW:
781         case SCTP_V6_FLOW:
782         case AH_ESP_V6_FLOW:
783         case AH_V6_FLOW:
784         case ESP_V6_FLOW:
785         case IPV6_FLOW:
786                 if (tuple == 2)
787                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
788                 else if (!tuple)
789                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
790                 break;
791         }
792
793         if (bp->rss_hash_cfg == rss_hash_cfg)
794                 return 0;
795
796         bp->rss_hash_cfg = rss_hash_cfg;
797         if (netif_running(bp->dev)) {
798                 bnxt_close_nic(bp, false, false);
799                 rc = bnxt_open_nic(bp, false, false);
800         }
801         return rc;
802 }
803
804 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
805                           u32 *rule_locs)
806 {
807         struct bnxt *bp = netdev_priv(dev);
808         int rc = 0;
809
810         switch (cmd->cmd) {
811 #ifdef CONFIG_RFS_ACCEL
812         case ETHTOOL_GRXRINGS:
813                 cmd->data = bp->rx_nr_rings;
814                 break;
815
816         case ETHTOOL_GRXCLSRLCNT:
817                 cmd->rule_cnt = bp->ntp_fltr_count;
818                 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
819                 break;
820
821         case ETHTOOL_GRXCLSRLALL:
822                 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
823                 break;
824
825         case ETHTOOL_GRXCLSRULE:
826                 rc = bnxt_grxclsrule(bp, cmd);
827                 break;
828 #endif
829
830         case ETHTOOL_GRXFH:
831                 rc = bnxt_grxfh(bp, cmd);
832                 break;
833
834         default:
835                 rc = -EOPNOTSUPP;
836                 break;
837         }
838
839         return rc;
840 }
841
842 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
843 {
844         struct bnxt *bp = netdev_priv(dev);
845         int rc;
846
847         switch (cmd->cmd) {
848         case ETHTOOL_SRXFH:
849                 rc = bnxt_srxfh(bp, cmd);
850                 break;
851
852         default:
853                 rc = -EOPNOTSUPP;
854                 break;
855         }
856         return rc;
857 }
858
859 static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
860 {
861         return HW_HASH_INDEX_SIZE;
862 }
863
864 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
865 {
866         return HW_HASH_KEY_SIZE;
867 }
868
869 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
870                          u8 *hfunc)
871 {
872         struct bnxt *bp = netdev_priv(dev);
873         struct bnxt_vnic_info *vnic;
874         int i = 0;
875
876         if (hfunc)
877                 *hfunc = ETH_RSS_HASH_TOP;
878
879         if (!bp->vnic_info)
880                 return 0;
881
882         vnic = &bp->vnic_info[0];
883         if (indir && vnic->rss_table) {
884                 for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
885                         indir[i] = le16_to_cpu(vnic->rss_table[i]);
886         }
887
888         if (key && vnic->rss_hash_key)
889                 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
890
891         return 0;
892 }
893
894 static void bnxt_get_drvinfo(struct net_device *dev,
895                              struct ethtool_drvinfo *info)
896 {
897         struct bnxt *bp = netdev_priv(dev);
898
899         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
900         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
901         strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
902         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
903         info->n_stats = bnxt_get_num_stats(bp);
904         info->testinfo_len = bp->num_tests;
905         /* TODO CHIMP_FW: eeprom dump details */
906         info->eedump_len = 0;
907         /* TODO CHIMP FW: reg dump details */
908         info->regdump_len = 0;
909 }
910
911 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
912 {
913         struct bnxt *bp = netdev_priv(dev);
914
915         wol->supported = 0;
916         wol->wolopts = 0;
917         memset(&wol->sopass, 0, sizeof(wol->sopass));
918         if (bp->flags & BNXT_FLAG_WOL_CAP) {
919                 wol->supported = WAKE_MAGIC;
920                 if (bp->wol)
921                         wol->wolopts = WAKE_MAGIC;
922         }
923 }
924
925 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
926 {
927         struct bnxt *bp = netdev_priv(dev);
928
929         if (wol->wolopts & ~WAKE_MAGIC)
930                 return -EINVAL;
931
932         if (wol->wolopts & WAKE_MAGIC) {
933                 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
934                         return -EINVAL;
935                 if (!bp->wol) {
936                         if (bnxt_hwrm_alloc_wol_fltr(bp))
937                                 return -EBUSY;
938                         bp->wol = 1;
939                 }
940         } else {
941                 if (bp->wol) {
942                         if (bnxt_hwrm_free_wol_fltr(bp))
943                                 return -EBUSY;
944                         bp->wol = 0;
945                 }
946         }
947         return 0;
948 }
949
950 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
951 {
952         u32 speed_mask = 0;
953
954         /* TODO: support 25GB, 40GB, 50GB with different cable type */
955         /* set the advertised speeds */
956         if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
957                 speed_mask |= ADVERTISED_100baseT_Full;
958         if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
959                 speed_mask |= ADVERTISED_1000baseT_Full;
960         if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
961                 speed_mask |= ADVERTISED_2500baseX_Full;
962         if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
963                 speed_mask |= ADVERTISED_10000baseT_Full;
964         if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
965                 speed_mask |= ADVERTISED_40000baseCR4_Full;
966
967         if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
968                 speed_mask |= ADVERTISED_Pause;
969         else if (fw_pause & BNXT_LINK_PAUSE_TX)
970                 speed_mask |= ADVERTISED_Asym_Pause;
971         else if (fw_pause & BNXT_LINK_PAUSE_RX)
972                 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
973
974         return speed_mask;
975 }
976
977 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
978 {                                                                       \
979         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB)                    \
980                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
981                                                      100baseT_Full);    \
982         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB)                      \
983                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
984                                                      1000baseT_Full);   \
985         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB)                     \
986                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
987                                                      10000baseT_Full);  \
988         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB)                     \
989                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
990                                                      25000baseCR_Full); \
991         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB)                     \
992                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
993                                                      40000baseCR4_Full);\
994         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB)                     \
995                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
996                                                      50000baseCR2_Full);\
997         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB)                    \
998                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
999                                                      100000baseCR4_Full);\
1000         if ((fw_pause) & BNXT_LINK_PAUSE_RX) {                          \
1001                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1002                                                      Pause);            \
1003                 if (!((fw_pause) & BNXT_LINK_PAUSE_TX))                 \
1004                         ethtool_link_ksettings_add_link_mode(           \
1005                                         lk_ksettings, name, Asym_Pause);\
1006         } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) {                   \
1007                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1008                                                      Asym_Pause);       \
1009         }                                                               \
1010 }
1011
1012 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name)          \
1013 {                                                                       \
1014         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1015                                                   100baseT_Full) ||     \
1016             ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1017                                                   100baseT_Half))       \
1018                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB;               \
1019         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1020                                                   1000baseT_Full) ||    \
1021             ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1022                                                   1000baseT_Half))      \
1023                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB;                 \
1024         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1025                                                   10000baseT_Full))     \
1026                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB;                \
1027         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1028                                                   25000baseCR_Full))    \
1029                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB;                \
1030         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1031                                                   40000baseCR4_Full))   \
1032                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB;                \
1033         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1034                                                   50000baseCR2_Full))   \
1035                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB;                \
1036         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1037                                                   100000baseCR4_Full))  \
1038                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB;               \
1039 }
1040
1041 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1042                                 struct ethtool_link_ksettings *lk_ksettings)
1043 {
1044         u16 fw_speeds = link_info->advertising;
1045         u8 fw_pause = 0;
1046
1047         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1048                 fw_pause = link_info->auto_pause_setting;
1049
1050         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
1051 }
1052
1053 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1054                                 struct ethtool_link_ksettings *lk_ksettings)
1055 {
1056         u16 fw_speeds = link_info->lp_auto_link_speeds;
1057         u8 fw_pause = 0;
1058
1059         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1060                 fw_pause = link_info->lp_pause;
1061
1062         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1063                                 lp_advertising);
1064 }
1065
1066 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1067                                 struct ethtool_link_ksettings *lk_ksettings)
1068 {
1069         u16 fw_speeds = link_info->support_speeds;
1070
1071         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1072
1073         ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
1074         ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1075                                              Asym_Pause);
1076
1077         if (link_info->support_auto_speeds)
1078                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1079                                                      Autoneg);
1080 }
1081
1082 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1083 {
1084         switch (fw_link_speed) {
1085         case BNXT_LINK_SPEED_100MB:
1086                 return SPEED_100;
1087         case BNXT_LINK_SPEED_1GB:
1088                 return SPEED_1000;
1089         case BNXT_LINK_SPEED_2_5GB:
1090                 return SPEED_2500;
1091         case BNXT_LINK_SPEED_10GB:
1092                 return SPEED_10000;
1093         case BNXT_LINK_SPEED_20GB:
1094                 return SPEED_20000;
1095         case BNXT_LINK_SPEED_25GB:
1096                 return SPEED_25000;
1097         case BNXT_LINK_SPEED_40GB:
1098                 return SPEED_40000;
1099         case BNXT_LINK_SPEED_50GB:
1100                 return SPEED_50000;
1101         case BNXT_LINK_SPEED_100GB:
1102                 return SPEED_100000;
1103         default:
1104                 return SPEED_UNKNOWN;
1105         }
1106 }
1107
1108 static int bnxt_get_link_ksettings(struct net_device *dev,
1109                                    struct ethtool_link_ksettings *lk_ksettings)
1110 {
1111         struct bnxt *bp = netdev_priv(dev);
1112         struct bnxt_link_info *link_info = &bp->link_info;
1113         struct ethtool_link_settings *base = &lk_ksettings->base;
1114         u32 ethtool_speed;
1115
1116         ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1117         mutex_lock(&bp->link_lock);
1118         bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1119
1120         ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1121         if (link_info->autoneg) {
1122                 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1123                 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1124                                                      advertising, Autoneg);
1125                 base->autoneg = AUTONEG_ENABLE;
1126                 if (link_info->phy_link_status == BNXT_LINK_LINK)
1127                         bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1128                 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1129                 if (!netif_carrier_ok(dev))
1130                         base->duplex = DUPLEX_UNKNOWN;
1131                 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1132                         base->duplex = DUPLEX_FULL;
1133                 else
1134                         base->duplex = DUPLEX_HALF;
1135         } else {
1136                 base->autoneg = AUTONEG_DISABLE;
1137                 ethtool_speed =
1138                         bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1139                 base->duplex = DUPLEX_HALF;
1140                 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1141                         base->duplex = DUPLEX_FULL;
1142         }
1143         base->speed = ethtool_speed;
1144
1145         base->port = PORT_NONE;
1146         if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1147                 base->port = PORT_TP;
1148                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1149                                                      TP);
1150                 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1151                                                      TP);
1152         } else {
1153                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1154                                                      FIBRE);
1155                 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1156                                                      FIBRE);
1157
1158                 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1159                         base->port = PORT_DA;
1160                 else if (link_info->media_type ==
1161                          PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1162                         base->port = PORT_FIBRE;
1163         }
1164         base->phy_address = link_info->phy_addr;
1165         mutex_unlock(&bp->link_lock);
1166
1167         return 0;
1168 }
1169
1170 static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
1171 {
1172         struct bnxt *bp = netdev_priv(dev);
1173         struct bnxt_link_info *link_info = &bp->link_info;
1174         u16 support_spds = link_info->support_speeds;
1175         u32 fw_speed = 0;
1176
1177         switch (ethtool_speed) {
1178         case SPEED_100:
1179                 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1180                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
1181                 break;
1182         case SPEED_1000:
1183                 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1184                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
1185                 break;
1186         case SPEED_2500:
1187                 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1188                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
1189                 break;
1190         case SPEED_10000:
1191                 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1192                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
1193                 break;
1194         case SPEED_20000:
1195                 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1196                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
1197                 break;
1198         case SPEED_25000:
1199                 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1200                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
1201                 break;
1202         case SPEED_40000:
1203                 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1204                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
1205                 break;
1206         case SPEED_50000:
1207                 if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
1208                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
1209                 break;
1210         case SPEED_100000:
1211                 if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
1212                         fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
1213                 break;
1214         default:
1215                 netdev_err(dev, "unsupported speed!\n");
1216                 break;
1217         }
1218         return fw_speed;
1219 }
1220
1221 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1222 {
1223         u16 fw_speed_mask = 0;
1224
1225         /* only support autoneg at speed 100, 1000, and 10000 */
1226         if (advertising & (ADVERTISED_100baseT_Full |
1227                            ADVERTISED_100baseT_Half)) {
1228                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1229         }
1230         if (advertising & (ADVERTISED_1000baseT_Full |
1231                            ADVERTISED_1000baseT_Half)) {
1232                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1233         }
1234         if (advertising & ADVERTISED_10000baseT_Full)
1235                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1236
1237         if (advertising & ADVERTISED_40000baseCR4_Full)
1238                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1239
1240         return fw_speed_mask;
1241 }
1242
1243 static int bnxt_set_link_ksettings(struct net_device *dev,
1244                            const struct ethtool_link_ksettings *lk_ksettings)
1245 {
1246         struct bnxt *bp = netdev_priv(dev);
1247         struct bnxt_link_info *link_info = &bp->link_info;
1248         const struct ethtool_link_settings *base = &lk_ksettings->base;
1249         bool set_pause = false;
1250         u16 fw_advertising = 0;
1251         u32 speed;
1252         int rc = 0;
1253
1254         if (!BNXT_SINGLE_PF(bp))
1255                 return -EOPNOTSUPP;
1256
1257         mutex_lock(&bp->link_lock);
1258         if (base->autoneg == AUTONEG_ENABLE) {
1259                 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1260                                         advertising);
1261                 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1262                 if (!fw_advertising)
1263                         link_info->advertising = link_info->support_auto_speeds;
1264                 else
1265                         link_info->advertising = fw_advertising;
1266                 /* any change to autoneg will cause link change, therefore the
1267                  * driver should put back the original pause setting in autoneg
1268                  */
1269                 set_pause = true;
1270         } else {
1271                 u16 fw_speed;
1272                 u8 phy_type = link_info->phy_type;
1273
1274                 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
1275                     phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1276                     link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1277                         netdev_err(dev, "10GBase-T devices must autoneg\n");
1278                         rc = -EINVAL;
1279                         goto set_setting_exit;
1280                 }
1281                 if (base->duplex == DUPLEX_HALF) {
1282                         netdev_err(dev, "HALF DUPLEX is not supported!\n");
1283                         rc = -EINVAL;
1284                         goto set_setting_exit;
1285                 }
1286                 speed = base->speed;
1287                 fw_speed = bnxt_get_fw_speed(dev, speed);
1288                 if (!fw_speed) {
1289                         rc = -EINVAL;
1290                         goto set_setting_exit;
1291                 }
1292                 link_info->req_link_speed = fw_speed;
1293                 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1294                 link_info->autoneg = 0;
1295                 link_info->advertising = 0;
1296         }
1297
1298         if (netif_running(dev))
1299                 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1300
1301 set_setting_exit:
1302         mutex_unlock(&bp->link_lock);
1303         return rc;
1304 }
1305
1306 static void bnxt_get_pauseparam(struct net_device *dev,
1307                                 struct ethtool_pauseparam *epause)
1308 {
1309         struct bnxt *bp = netdev_priv(dev);
1310         struct bnxt_link_info *link_info = &bp->link_info;
1311
1312         if (BNXT_VF(bp))
1313                 return;
1314         epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
1315         epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
1316         epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
1317 }
1318
1319 static int bnxt_set_pauseparam(struct net_device *dev,
1320                                struct ethtool_pauseparam *epause)
1321 {
1322         int rc = 0;
1323         struct bnxt *bp = netdev_priv(dev);
1324         struct bnxt_link_info *link_info = &bp->link_info;
1325
1326         if (!BNXT_SINGLE_PF(bp))
1327                 return -EOPNOTSUPP;
1328
1329         if (epause->autoneg) {
1330                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
1331                         return -EINVAL;
1332
1333                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
1334                 if (bp->hwrm_spec_code >= 0x10201)
1335                         link_info->req_flow_ctrl =
1336                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
1337         } else {
1338                 /* when transition from auto pause to force pause,
1339                  * force a link change
1340                  */
1341                 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1342                         link_info->force_link_chng = true;
1343                 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
1344                 link_info->req_flow_ctrl = 0;
1345         }
1346         if (epause->rx_pause)
1347                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
1348
1349         if (epause->tx_pause)
1350                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
1351
1352         if (netif_running(dev))
1353                 rc = bnxt_hwrm_set_pause(bp);
1354         return rc;
1355 }
1356
1357 static u32 bnxt_get_link(struct net_device *dev)
1358 {
1359         struct bnxt *bp = netdev_priv(dev);
1360
1361         /* TODO: handle MF, VF, driver close case */
1362         return bp->link_info.link_up;
1363 }
1364
1365 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1366                                 u16 ext, u16 *index, u32 *item_length,
1367                                 u32 *data_length);
1368
1369 static int bnxt_flash_nvram(struct net_device *dev,
1370                             u16 dir_type,
1371                             u16 dir_ordinal,
1372                             u16 dir_ext,
1373                             u16 dir_attr,
1374                             const u8 *data,
1375                             size_t data_len)
1376 {
1377         struct bnxt *bp = netdev_priv(dev);
1378         int rc;
1379         struct hwrm_nvm_write_input req = {0};
1380         dma_addr_t dma_handle;
1381         u8 *kmem;
1382
1383         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
1384
1385         req.dir_type = cpu_to_le16(dir_type);
1386         req.dir_ordinal = cpu_to_le16(dir_ordinal);
1387         req.dir_ext = cpu_to_le16(dir_ext);
1388         req.dir_attr = cpu_to_le16(dir_attr);
1389         req.dir_data_length = cpu_to_le32(data_len);
1390
1391         kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
1392                                   GFP_KERNEL);
1393         if (!kmem) {
1394                 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1395                            (unsigned)data_len);
1396                 return -ENOMEM;
1397         }
1398         memcpy(kmem, data, data_len);
1399         req.host_src_addr = cpu_to_le64(dma_handle);
1400
1401         rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
1402         dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
1403
1404         return rc;
1405 }
1406
1407 static int bnxt_firmware_reset(struct net_device *dev,
1408                                u16 dir_type)
1409 {
1410         struct bnxt *bp = netdev_priv(dev);
1411         struct hwrm_fw_reset_input req = {0};
1412
1413         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
1414
1415         /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1416         /*       (e.g. when firmware isn't already running) */
1417         switch (dir_type) {
1418         case BNX_DIR_TYPE_CHIMP_PATCH:
1419         case BNX_DIR_TYPE_BOOTCODE:
1420         case BNX_DIR_TYPE_BOOTCODE_2:
1421                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
1422                 /* Self-reset ChiMP upon next PCIe reset: */
1423                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1424                 break;
1425         case BNX_DIR_TYPE_APE_FW:
1426         case BNX_DIR_TYPE_APE_PATCH:
1427                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
1428                 /* Self-reset APE upon next PCIe reset: */
1429                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1430                 break;
1431         case BNX_DIR_TYPE_KONG_FW:
1432         case BNX_DIR_TYPE_KONG_PATCH:
1433                 req.embedded_proc_type =
1434                         FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
1435                 break;
1436         case BNX_DIR_TYPE_BONO_FW:
1437         case BNX_DIR_TYPE_BONO_PATCH:
1438                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
1439                 break;
1440         case BNXT_FW_RESET_CHIP:
1441                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
1442                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
1443                 break;
1444         case BNXT_FW_RESET_AP:
1445                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
1446                 break;
1447         default:
1448                 return -EINVAL;
1449         }
1450
1451         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1452 }
1453
1454 static int bnxt_flash_firmware(struct net_device *dev,
1455                                u16 dir_type,
1456                                const u8 *fw_data,
1457                                size_t fw_size)
1458 {
1459         int     rc = 0;
1460         u16     code_type;
1461         u32     stored_crc;
1462         u32     calculated_crc;
1463         struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
1464
1465         switch (dir_type) {
1466         case BNX_DIR_TYPE_BOOTCODE:
1467         case BNX_DIR_TYPE_BOOTCODE_2:
1468                 code_type = CODE_BOOT;
1469                 break;
1470         case BNX_DIR_TYPE_CHIMP_PATCH:
1471                 code_type = CODE_CHIMP_PATCH;
1472                 break;
1473         case BNX_DIR_TYPE_APE_FW:
1474                 code_type = CODE_MCTP_PASSTHRU;
1475                 break;
1476         case BNX_DIR_TYPE_APE_PATCH:
1477                 code_type = CODE_APE_PATCH;
1478                 break;
1479         case BNX_DIR_TYPE_KONG_FW:
1480                 code_type = CODE_KONG_FW;
1481                 break;
1482         case BNX_DIR_TYPE_KONG_PATCH:
1483                 code_type = CODE_KONG_PATCH;
1484                 break;
1485         case BNX_DIR_TYPE_BONO_FW:
1486                 code_type = CODE_BONO_FW;
1487                 break;
1488         case BNX_DIR_TYPE_BONO_PATCH:
1489                 code_type = CODE_BONO_PATCH;
1490                 break;
1491         default:
1492                 netdev_err(dev, "Unsupported directory entry type: %u\n",
1493                            dir_type);
1494                 return -EINVAL;
1495         }
1496         if (fw_size < sizeof(struct bnxt_fw_header)) {
1497                 netdev_err(dev, "Invalid firmware file size: %u\n",
1498                            (unsigned int)fw_size);
1499                 return -EINVAL;
1500         }
1501         if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
1502                 netdev_err(dev, "Invalid firmware signature: %08X\n",
1503                            le32_to_cpu(header->signature));
1504                 return -EINVAL;
1505         }
1506         if (header->code_type != code_type) {
1507                 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
1508                            code_type, header->code_type);
1509                 return -EINVAL;
1510         }
1511         if (header->device != DEVICE_CUMULUS_FAMILY) {
1512                 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
1513                            DEVICE_CUMULUS_FAMILY, header->device);
1514                 return -EINVAL;
1515         }
1516         /* Confirm the CRC32 checksum of the file: */
1517         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1518                                              sizeof(stored_crc)));
1519         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1520         if (calculated_crc != stored_crc) {
1521                 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
1522                            (unsigned long)stored_crc,
1523                            (unsigned long)calculated_crc);
1524                 return -EINVAL;
1525         }
1526         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1527                               0, 0, fw_data, fw_size);
1528         if (rc == 0)    /* Firmware update successful */
1529                 rc = bnxt_firmware_reset(dev, dir_type);
1530
1531         return rc;
1532 }
1533
1534 static int bnxt_flash_microcode(struct net_device *dev,
1535                                 u16 dir_type,
1536                                 const u8 *fw_data,
1537                                 size_t fw_size)
1538 {
1539         struct bnxt_ucode_trailer *trailer;
1540         u32 calculated_crc;
1541         u32 stored_crc;
1542         int rc = 0;
1543
1544         if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
1545                 netdev_err(dev, "Invalid microcode file size: %u\n",
1546                            (unsigned int)fw_size);
1547                 return -EINVAL;
1548         }
1549         trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
1550                                                 sizeof(*trailer)));
1551         if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
1552                 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
1553                            le32_to_cpu(trailer->sig));
1554                 return -EINVAL;
1555         }
1556         if (le16_to_cpu(trailer->dir_type) != dir_type) {
1557                 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
1558                            dir_type, le16_to_cpu(trailer->dir_type));
1559                 return -EINVAL;
1560         }
1561         if (le16_to_cpu(trailer->trailer_length) <
1562                 sizeof(struct bnxt_ucode_trailer)) {
1563                 netdev_err(dev, "Invalid microcode trailer length: %d\n",
1564                            le16_to_cpu(trailer->trailer_length));
1565                 return -EINVAL;
1566         }
1567
1568         /* Confirm the CRC32 checksum of the file: */
1569         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1570                                              sizeof(stored_crc)));
1571         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1572         if (calculated_crc != stored_crc) {
1573                 netdev_err(dev,
1574                            "CRC32 (%08lX) does not match calculated: %08lX\n",
1575                            (unsigned long)stored_crc,
1576                            (unsigned long)calculated_crc);
1577                 return -EINVAL;
1578         }
1579         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1580                               0, 0, fw_data, fw_size);
1581
1582         return rc;
1583 }
1584
1585 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
1586 {
1587         switch (dir_type) {
1588         case BNX_DIR_TYPE_CHIMP_PATCH:
1589         case BNX_DIR_TYPE_BOOTCODE:
1590         case BNX_DIR_TYPE_BOOTCODE_2:
1591         case BNX_DIR_TYPE_APE_FW:
1592         case BNX_DIR_TYPE_APE_PATCH:
1593         case BNX_DIR_TYPE_KONG_FW:
1594         case BNX_DIR_TYPE_KONG_PATCH:
1595         case BNX_DIR_TYPE_BONO_FW:
1596         case BNX_DIR_TYPE_BONO_PATCH:
1597                 return true;
1598         }
1599
1600         return false;
1601 }
1602
1603 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
1604 {
1605         switch (dir_type) {
1606         case BNX_DIR_TYPE_AVS:
1607         case BNX_DIR_TYPE_EXP_ROM_MBA:
1608         case BNX_DIR_TYPE_PCIE:
1609         case BNX_DIR_TYPE_TSCF_UCODE:
1610         case BNX_DIR_TYPE_EXT_PHY:
1611         case BNX_DIR_TYPE_CCM:
1612         case BNX_DIR_TYPE_ISCSI_BOOT:
1613         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
1614         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
1615                 return true;
1616         }
1617
1618         return false;
1619 }
1620
1621 static bool bnxt_dir_type_is_executable(u16 dir_type)
1622 {
1623         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
1624                 bnxt_dir_type_is_other_exec_format(dir_type);
1625 }
1626
1627 static int bnxt_flash_firmware_from_file(struct net_device *dev,
1628                                          u16 dir_type,
1629                                          const char *filename)
1630 {
1631         const struct firmware  *fw;
1632         int                     rc;
1633
1634         rc = request_firmware(&fw, filename, &dev->dev);
1635         if (rc != 0) {
1636                 netdev_err(dev, "Error %d requesting firmware file: %s\n",
1637                            rc, filename);
1638                 return rc;
1639         }
1640         if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
1641                 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
1642         else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
1643                 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
1644         else
1645                 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1646                                       0, 0, fw->data, fw->size);
1647         release_firmware(fw);
1648         return rc;
1649 }
1650
1651 static int bnxt_flash_package_from_file(struct net_device *dev,
1652                                         char *filename, u32 install_type)
1653 {
1654         struct bnxt *bp = netdev_priv(dev);
1655         struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
1656         struct hwrm_nvm_install_update_input install = {0};
1657         const struct firmware *fw;
1658         u32 item_len;
1659         u16 index;
1660         int rc;
1661
1662         bnxt_hwrm_fw_set_time(bp);
1663
1664         if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
1665                                  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1666                                  &index, &item_len, NULL) != 0) {
1667                 netdev_err(dev, "PKG update area not created in nvram\n");
1668                 return -ENOBUFS;
1669         }
1670
1671         rc = request_firmware(&fw, filename, &dev->dev);
1672         if (rc != 0) {
1673                 netdev_err(dev, "PKG error %d requesting file: %s\n",
1674                            rc, filename);
1675                 return rc;
1676         }
1677
1678         if (fw->size > item_len) {
1679                 netdev_err(dev, "PKG insufficient update area in nvram: %lu",
1680                            (unsigned long)fw->size);
1681                 rc = -EFBIG;
1682         } else {
1683                 dma_addr_t dma_handle;
1684                 u8 *kmem;
1685                 struct hwrm_nvm_modify_input modify = {0};
1686
1687                 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
1688
1689                 modify.dir_idx = cpu_to_le16(index);
1690                 modify.len = cpu_to_le32(fw->size);
1691
1692                 kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
1693                                           &dma_handle, GFP_KERNEL);
1694                 if (!kmem) {
1695                         netdev_err(dev,
1696                                    "dma_alloc_coherent failure, length = %u\n",
1697                                    (unsigned int)fw->size);
1698                         rc = -ENOMEM;
1699                 } else {
1700                         memcpy(kmem, fw->data, fw->size);
1701                         modify.host_src_addr = cpu_to_le64(dma_handle);
1702
1703                         rc = hwrm_send_message(bp, &modify, sizeof(modify),
1704                                                FLASH_PACKAGE_TIMEOUT);
1705                         dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
1706                                           dma_handle);
1707                 }
1708         }
1709         release_firmware(fw);
1710         if (rc)
1711                 return rc;
1712
1713         if ((install_type & 0xffff) == 0)
1714                 install_type >>= 16;
1715         bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
1716         install.install_type = cpu_to_le32(install_type);
1717
1718         mutex_lock(&bp->hwrm_cmd_lock);
1719         rc = _hwrm_send_message(bp, &install, sizeof(install),
1720                                 INSTALL_PACKAGE_TIMEOUT);
1721         if (rc) {
1722                 rc = -EOPNOTSUPP;
1723                 goto flash_pkg_exit;
1724         }
1725
1726         if (resp->error_code) {
1727                 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
1728
1729                 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
1730                         install.flags |= cpu_to_le16(
1731                                NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
1732                         rc = _hwrm_send_message(bp, &install, sizeof(install),
1733                                                 INSTALL_PACKAGE_TIMEOUT);
1734                         if (rc) {
1735                                 rc = -EOPNOTSUPP;
1736                                 goto flash_pkg_exit;
1737                         }
1738                 }
1739         }
1740
1741         if (resp->result) {
1742                 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
1743                            (s8)resp->result, (int)resp->problem_item);
1744                 rc = -ENOPKG;
1745         }
1746 flash_pkg_exit:
1747         mutex_unlock(&bp->hwrm_cmd_lock);
1748         return rc;
1749 }
1750
1751 static int bnxt_flash_device(struct net_device *dev,
1752                              struct ethtool_flash *flash)
1753 {
1754         if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
1755                 netdev_err(dev, "flashdev not supported from a virtual function\n");
1756                 return -EINVAL;
1757         }
1758
1759         if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
1760             flash->region > 0xffff)
1761                 return bnxt_flash_package_from_file(dev, flash->data,
1762                                                     flash->region);
1763
1764         return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
1765 }
1766
1767 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
1768 {
1769         struct bnxt *bp = netdev_priv(dev);
1770         int rc;
1771         struct hwrm_nvm_get_dir_info_input req = {0};
1772         struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
1773
1774         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
1775
1776         mutex_lock(&bp->hwrm_cmd_lock);
1777         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1778         if (!rc) {
1779                 *entries = le32_to_cpu(output->entries);
1780                 *length = le32_to_cpu(output->entry_length);
1781         }
1782         mutex_unlock(&bp->hwrm_cmd_lock);
1783         return rc;
1784 }
1785
1786 static int bnxt_get_eeprom_len(struct net_device *dev)
1787 {
1788         /* The -1 return value allows the entire 32-bit range of offsets to be
1789          * passed via the ethtool command-line utility.
1790          */
1791         return -1;
1792 }
1793
1794 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1795 {
1796         struct bnxt *bp = netdev_priv(dev);
1797         int rc;
1798         u32 dir_entries;
1799         u32 entry_length;
1800         u8 *buf;
1801         size_t buflen;
1802         dma_addr_t dma_handle;
1803         struct hwrm_nvm_get_dir_entries_input req = {0};
1804
1805         rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
1806         if (rc != 0)
1807                 return rc;
1808
1809         /* Insert 2 bytes of directory info (count and size of entries) */
1810         if (len < 2)
1811                 return -EINVAL;
1812
1813         *data++ = dir_entries;
1814         *data++ = entry_length;
1815         len -= 2;
1816         memset(data, 0xff, len);
1817
1818         buflen = dir_entries * entry_length;
1819         buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
1820                                  GFP_KERNEL);
1821         if (!buf) {
1822                 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1823                            (unsigned)buflen);
1824                 return -ENOMEM;
1825         }
1826         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
1827         req.host_dest_addr = cpu_to_le64(dma_handle);
1828         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1829         if (rc == 0)
1830                 memcpy(data, buf, len > buflen ? buflen : len);
1831         dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
1832         return rc;
1833 }
1834
1835 static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
1836                                u32 length, u8 *data)
1837 {
1838         struct bnxt *bp = netdev_priv(dev);
1839         int rc;
1840         u8 *buf;
1841         dma_addr_t dma_handle;
1842         struct hwrm_nvm_read_input req = {0};
1843
1844         if (!length)
1845                 return -EINVAL;
1846
1847         buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
1848                                  GFP_KERNEL);
1849         if (!buf) {
1850                 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1851                            (unsigned)length);
1852                 return -ENOMEM;
1853         }
1854         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
1855         req.host_dest_addr = cpu_to_le64(dma_handle);
1856         req.dir_idx = cpu_to_le16(index);
1857         req.offset = cpu_to_le32(offset);
1858         req.len = cpu_to_le32(length);
1859
1860         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1861         if (rc == 0)
1862                 memcpy(data, buf, length);
1863         dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
1864         return rc;
1865 }
1866
1867 static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1868                                 u16 ext, u16 *index, u32 *item_length,
1869                                 u32 *data_length)
1870 {
1871         struct bnxt *bp = netdev_priv(dev);
1872         int rc;
1873         struct hwrm_nvm_find_dir_entry_input req = {0};
1874         struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
1875
1876         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
1877         req.enables = 0;
1878         req.dir_idx = 0;
1879         req.dir_type = cpu_to_le16(type);
1880         req.dir_ordinal = cpu_to_le16(ordinal);
1881         req.dir_ext = cpu_to_le16(ext);
1882         req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1883         mutex_lock(&bp->hwrm_cmd_lock);
1884         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1885         if (rc == 0) {
1886                 if (index)
1887                         *index = le16_to_cpu(output->dir_idx);
1888                 if (item_length)
1889                         *item_length = le32_to_cpu(output->dir_item_length);
1890                 if (data_length)
1891                         *data_length = le32_to_cpu(output->dir_data_length);
1892         }
1893         mutex_unlock(&bp->hwrm_cmd_lock);
1894         return rc;
1895 }
1896
1897 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1898 {
1899         char    *retval = NULL;
1900         char    *p;
1901         char    *value;
1902         int     field = 0;
1903
1904         if (datalen < 1)
1905                 return NULL;
1906         /* null-terminate the log data (removing last '\n'): */
1907         data[datalen - 1] = 0;
1908         for (p = data; *p != 0; p++) {
1909                 field = 0;
1910                 retval = NULL;
1911                 while (*p != 0 && *p != '\n') {
1912                         value = p;
1913                         while (*p != 0 && *p != '\t' && *p != '\n')
1914                                 p++;
1915                         if (field == desired_field)
1916                                 retval = value;
1917                         if (*p != '\t')
1918                                 break;
1919                         *p = 0;
1920                         field++;
1921                         p++;
1922                 }
1923                 if (*p == 0)
1924                         break;
1925                 *p = 0;
1926         }
1927         return retval;
1928 }
1929
1930 static void bnxt_get_pkgver(struct net_device *dev)
1931 {
1932         struct bnxt *bp = netdev_priv(dev);
1933         u16 index = 0;
1934         char *pkgver;
1935         u32 pkglen;
1936         u8 *pkgbuf;
1937         int len;
1938
1939         if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1940                                  BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1941                                  &index, NULL, &pkglen) != 0)
1942                 return;
1943
1944         pkgbuf = kzalloc(pkglen, GFP_KERNEL);
1945         if (!pkgbuf) {
1946                 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
1947                         pkglen);
1948                 return;
1949         }
1950
1951         if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
1952                 goto err;
1953
1954         pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
1955                                    pkglen);
1956         if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
1957                 len = strlen(bp->fw_ver_str);
1958                 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
1959                          "/pkg %s", pkgver);
1960         }
1961 err:
1962         kfree(pkgbuf);
1963 }
1964
1965 static int bnxt_get_eeprom(struct net_device *dev,
1966                            struct ethtool_eeprom *eeprom,
1967                            u8 *data)
1968 {
1969         u32 index;
1970         u32 offset;
1971
1972         if (eeprom->offset == 0) /* special offset value to get directory */
1973                 return bnxt_get_nvram_directory(dev, eeprom->len, data);
1974
1975         index = eeprom->offset >> 24;
1976         offset = eeprom->offset & 0xffffff;
1977
1978         if (index == 0) {
1979                 netdev_err(dev, "unsupported index value: %d\n", index);
1980                 return -EINVAL;
1981         }
1982
1983         return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
1984 }
1985
1986 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
1987 {
1988         struct bnxt *bp = netdev_priv(dev);
1989         struct hwrm_nvm_erase_dir_entry_input req = {0};
1990
1991         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
1992         req.dir_idx = cpu_to_le16(index);
1993         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1994 }
1995
1996 static int bnxt_set_eeprom(struct net_device *dev,
1997                            struct ethtool_eeprom *eeprom,
1998                            u8 *data)
1999 {
2000         struct bnxt *bp = netdev_priv(dev);
2001         u8 index, dir_op;
2002         u16 type, ext, ordinal, attr;
2003
2004         if (!BNXT_PF(bp)) {
2005                 netdev_err(dev, "NVM write not supported from a virtual function\n");
2006                 return -EINVAL;
2007         }
2008
2009         type = eeprom->magic >> 16;
2010
2011         if (type == 0xffff) { /* special value for directory operations */
2012                 index = eeprom->magic & 0xff;
2013                 dir_op = eeprom->magic >> 8;
2014                 if (index == 0)
2015                         return -EINVAL;
2016                 switch (dir_op) {
2017                 case 0x0e: /* erase */
2018                         if (eeprom->offset != ~eeprom->magic)
2019                                 return -EINVAL;
2020                         return bnxt_erase_nvram_directory(dev, index - 1);
2021                 default:
2022                         return -EINVAL;
2023                 }
2024         }
2025
2026         /* Create or re-write an NVM item: */
2027         if (bnxt_dir_type_is_executable(type) == true)
2028                 return -EOPNOTSUPP;
2029         ext = eeprom->magic & 0xffff;
2030         ordinal = eeprom->offset >> 16;
2031         attr = eeprom->offset & 0xffff;
2032
2033         return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
2034                                 eeprom->len);
2035 }
2036
2037 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
2038 {
2039         struct bnxt *bp = netdev_priv(dev);
2040         struct ethtool_eee *eee = &bp->eee;
2041         struct bnxt_link_info *link_info = &bp->link_info;
2042         u32 advertising =
2043                  _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
2044         int rc = 0;
2045
2046         if (!BNXT_SINGLE_PF(bp))
2047                 return -EOPNOTSUPP;
2048
2049         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2050                 return -EOPNOTSUPP;
2051
2052         if (!edata->eee_enabled)
2053                 goto eee_ok;
2054
2055         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2056                 netdev_warn(dev, "EEE requires autoneg\n");
2057                 return -EINVAL;
2058         }
2059         if (edata->tx_lpi_enabled) {
2060                 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
2061                                        edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
2062                         netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
2063                                     bp->lpi_tmr_lo, bp->lpi_tmr_hi);
2064                         return -EINVAL;
2065                 } else if (!bp->lpi_tmr_hi) {
2066                         edata->tx_lpi_timer = eee->tx_lpi_timer;
2067                 }
2068         }
2069         if (!edata->advertised) {
2070                 edata->advertised = advertising & eee->supported;
2071         } else if (edata->advertised & ~advertising) {
2072                 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
2073                             edata->advertised, advertising);
2074                 return -EINVAL;
2075         }
2076
2077         eee->advertised = edata->advertised;
2078         eee->tx_lpi_enabled = edata->tx_lpi_enabled;
2079         eee->tx_lpi_timer = edata->tx_lpi_timer;
2080 eee_ok:
2081         eee->eee_enabled = edata->eee_enabled;
2082
2083         if (netif_running(dev))
2084                 rc = bnxt_hwrm_set_link_setting(bp, false, true);
2085
2086         return rc;
2087 }
2088
2089 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
2090 {
2091         struct bnxt *bp = netdev_priv(dev);
2092
2093         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
2094                 return -EOPNOTSUPP;
2095
2096         *edata = bp->eee;
2097         if (!bp->eee.eee_enabled) {
2098                 /* Preserve tx_lpi_timer so that the last value will be used
2099                  * by default when it is re-enabled.
2100                  */
2101                 edata->advertised = 0;
2102                 edata->tx_lpi_enabled = 0;
2103         }
2104
2105         if (!bp->eee.eee_active)
2106                 edata->lp_advertised = 0;
2107
2108         return 0;
2109 }
2110
2111 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
2112                                             u16 page_number, u16 start_addr,
2113                                             u16 data_length, u8 *buf)
2114 {
2115         struct hwrm_port_phy_i2c_read_input req = {0};
2116         struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2117         int rc, byte_offset = 0;
2118
2119         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2120         req.i2c_slave_addr = i2c_addr;
2121         req.page_number = cpu_to_le16(page_number);
2122         req.port_id = cpu_to_le16(bp->pf.port_id);
2123         do {
2124                 u16 xfer_size;
2125
2126                 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2127                 data_length -= xfer_size;
2128                 req.page_offset = cpu_to_le16(start_addr + byte_offset);
2129                 req.data_length = xfer_size;
2130                 req.enables = cpu_to_le32(start_addr + byte_offset ?
2131                                  PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
2132                 mutex_lock(&bp->hwrm_cmd_lock);
2133                 rc = _hwrm_send_message(bp, &req, sizeof(req),
2134                                         HWRM_CMD_TIMEOUT);
2135                 if (!rc)
2136                         memcpy(buf + byte_offset, output->data, xfer_size);
2137                 mutex_unlock(&bp->hwrm_cmd_lock);
2138                 byte_offset += xfer_size;
2139         } while (!rc && data_length > 0);
2140
2141         return rc;
2142 }
2143
2144 static int bnxt_get_module_info(struct net_device *dev,
2145                                 struct ethtool_modinfo *modinfo)
2146 {
2147         struct bnxt *bp = netdev_priv(dev);
2148         struct hwrm_port_phy_i2c_read_input req = {0};
2149         struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2150         int rc;
2151
2152         /* No point in going further if phy status indicates
2153          * module is not inserted or if it is powered down or
2154          * if it is of type 10GBase-T
2155          */
2156         if (bp->link_info.module_status >
2157                 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
2158                 return -EOPNOTSUPP;
2159
2160         /* This feature is not supported in older firmware versions */
2161         if (bp->hwrm_spec_code < 0x10202)
2162                 return -EOPNOTSUPP;
2163
2164         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2165         req.i2c_slave_addr = I2C_DEV_ADDR_A0;
2166         req.page_number = 0;
2167         req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
2168         req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
2169         req.port_id = cpu_to_le16(bp->pf.port_id);
2170         mutex_lock(&bp->hwrm_cmd_lock);
2171         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2172         if (!rc) {
2173                 u32 module_id = le32_to_cpu(output->data[0]);
2174
2175                 switch (module_id) {
2176                 case SFF_MODULE_ID_SFP:
2177                         modinfo->type = ETH_MODULE_SFF_8472;
2178                         modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2179                         break;
2180                 case SFF_MODULE_ID_QSFP:
2181                 case SFF_MODULE_ID_QSFP_PLUS:
2182                         modinfo->type = ETH_MODULE_SFF_8436;
2183                         modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2184                         break;
2185                 case SFF_MODULE_ID_QSFP28:
2186                         modinfo->type = ETH_MODULE_SFF_8636;
2187                         modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2188                         break;
2189                 default:
2190                         rc = -EOPNOTSUPP;
2191                         break;
2192                 }
2193         }
2194         mutex_unlock(&bp->hwrm_cmd_lock);
2195         return rc;
2196 }
2197
2198 static int bnxt_get_module_eeprom(struct net_device *dev,
2199                                   struct ethtool_eeprom *eeprom,
2200                                   u8 *data)
2201 {
2202         struct bnxt *bp = netdev_priv(dev);
2203         u16  start = eeprom->offset, length = eeprom->len;
2204         int rc = 0;
2205
2206         memset(data, 0, eeprom->len);
2207
2208         /* Read A0 portion of the EEPROM */
2209         if (start < ETH_MODULE_SFF_8436_LEN) {
2210                 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
2211                         length = ETH_MODULE_SFF_8436_LEN - start;
2212                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
2213                                                       start, length, data);
2214                 if (rc)
2215                         return rc;
2216                 start += length;
2217                 data += length;
2218                 length = eeprom->len - length;
2219         }
2220
2221         /* Read A2 portion of the EEPROM */
2222         if (length) {
2223                 start -= ETH_MODULE_SFF_8436_LEN;
2224                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
2225                                                       start, length, data);
2226         }
2227         return rc;
2228 }
2229
2230 static int bnxt_nway_reset(struct net_device *dev)
2231 {
2232         int rc = 0;
2233
2234         struct bnxt *bp = netdev_priv(dev);
2235         struct bnxt_link_info *link_info = &bp->link_info;
2236
2237         if (!BNXT_SINGLE_PF(bp))
2238                 return -EOPNOTSUPP;
2239
2240         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
2241                 return -EINVAL;
2242
2243         if (netif_running(dev))
2244                 rc = bnxt_hwrm_set_link_setting(bp, true, false);
2245
2246         return rc;
2247 }
2248
2249 static int bnxt_set_phys_id(struct net_device *dev,
2250                             enum ethtool_phys_id_state state)
2251 {
2252         struct hwrm_port_led_cfg_input req = {0};
2253         struct bnxt *bp = netdev_priv(dev);
2254         struct bnxt_pf_info *pf = &bp->pf;
2255         struct bnxt_led_cfg *led_cfg;
2256         u8 led_state;
2257         __le16 duration;
2258         int i, rc;
2259
2260         if (!bp->num_leds || BNXT_VF(bp))
2261                 return -EOPNOTSUPP;
2262
2263         if (state == ETHTOOL_ID_ACTIVE) {
2264                 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
2265                 duration = cpu_to_le16(500);
2266         } else if (state == ETHTOOL_ID_INACTIVE) {
2267                 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
2268                 duration = cpu_to_le16(0);
2269         } else {
2270                 return -EINVAL;
2271         }
2272         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
2273         req.port_id = cpu_to_le16(pf->port_id);
2274         req.num_leds = bp->num_leds;
2275         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2276         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2277                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2278                 led_cfg->led_id = bp->leds[i].led_id;
2279                 led_cfg->led_state = led_state;
2280                 led_cfg->led_blink_on = duration;
2281                 led_cfg->led_blink_off = duration;
2282                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2283         }
2284         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2285         if (rc)
2286                 rc = -EIO;
2287         return rc;
2288 }
2289
2290 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
2291 {
2292         struct hwrm_selftest_irq_input req = {0};
2293
2294         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
2295         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2296 }
2297
2298 static int bnxt_test_irq(struct bnxt *bp)
2299 {
2300         int i;
2301
2302         for (i = 0; i < bp->cp_nr_rings; i++) {
2303                 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
2304                 int rc;
2305
2306                 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
2307                 if (rc)
2308                         return rc;
2309         }
2310         return 0;
2311 }
2312
2313 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
2314 {
2315         struct hwrm_port_mac_cfg_input req = {0};
2316
2317         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
2318
2319         req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
2320         if (enable)
2321                 req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
2322         else
2323                 req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
2324         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2325 }
2326
2327 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
2328                                     struct hwrm_port_phy_cfg_input *req)
2329 {
2330         struct bnxt_link_info *link_info = &bp->link_info;
2331         u16 fw_advertising = link_info->advertising;
2332         u16 fw_speed;
2333         int rc;
2334
2335         if (!link_info->autoneg)
2336                 return 0;
2337
2338         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2339         if (netif_carrier_ok(bp->dev))
2340                 fw_speed = bp->link_info.link_speed;
2341         else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
2342                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2343         else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
2344                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2345         else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
2346                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2347         else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
2348                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2349
2350         req->force_link_speed = cpu_to_le16(fw_speed);
2351         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
2352                                   PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2353         rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
2354         req->flags = 0;
2355         req->force_link_speed = cpu_to_le16(0);
2356         return rc;
2357 }
2358
2359 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
2360 {
2361         struct hwrm_port_phy_cfg_input req = {0};
2362
2363         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
2364
2365         if (enable) {
2366                 bnxt_disable_an_for_lpbk(bp, &req);
2367                 req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
2368         } else {
2369                 req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
2370         }
2371         req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
2372         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2373 }
2374
2375 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
2376                             u32 raw_cons, int pkt_size)
2377 {
2378         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2379         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2380         struct bnxt_sw_rx_bd *rx_buf;
2381         struct rx_cmp *rxcmp;
2382         u16 cp_cons, cons;
2383         u8 *data;
2384         u32 len;
2385         int i;
2386
2387         cp_cons = RING_CMP(raw_cons);
2388         rxcmp = (struct rx_cmp *)
2389                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2390         cons = rxcmp->rx_cmp_opaque;
2391         rx_buf = &rxr->rx_buf_ring[cons];
2392         data = rx_buf->data_ptr;
2393         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
2394         if (len != pkt_size)
2395                 return -EIO;
2396         i = ETH_ALEN;
2397         if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
2398                 return -EIO;
2399         i += ETH_ALEN;
2400         for (  ; i < pkt_size; i++) {
2401                 if (data[i] != (u8)(i & 0xff))
2402                         return -EIO;
2403         }
2404         return 0;
2405 }
2406
2407 static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
2408 {
2409         struct bnxt_napi *bnapi = bp->bnapi[0];
2410         struct bnxt_cp_ring_info *cpr;
2411         struct tx_cmp *txcmp;
2412         int rc = -EIO;
2413         u32 raw_cons;
2414         u32 cons;
2415         int i;
2416
2417         cpr = &bnapi->cp_ring;
2418         raw_cons = cpr->cp_raw_cons;
2419         for (i = 0; i < 200; i++) {
2420                 cons = RING_CMP(raw_cons);
2421                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2422
2423                 if (!TX_CMP_VALID(txcmp, raw_cons)) {
2424                         udelay(5);
2425                         continue;
2426                 }
2427
2428                 /* The valid test of the entry must be done first before
2429                  * reading any further.
2430                  */
2431                 dma_rmb();
2432                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
2433                         rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
2434                         raw_cons = NEXT_RAW_CMP(raw_cons);
2435                         raw_cons = NEXT_RAW_CMP(raw_cons);
2436                         break;
2437                 }
2438                 raw_cons = NEXT_RAW_CMP(raw_cons);
2439         }
2440         cpr->cp_raw_cons = raw_cons;
2441         return rc;
2442 }
2443
2444 static int bnxt_run_loopback(struct bnxt *bp)
2445 {
2446         struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
2447         int pkt_size, i = 0;
2448         struct sk_buff *skb;
2449         dma_addr_t map;
2450         u8 *data;
2451         int rc;
2452
2453         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
2454         skb = netdev_alloc_skb(bp->dev, pkt_size);
2455         if (!skb)
2456                 return -ENOMEM;
2457         data = skb_put(skb, pkt_size);
2458         eth_broadcast_addr(data);
2459         i += ETH_ALEN;
2460         ether_addr_copy(&data[i], bp->dev->dev_addr);
2461         i += ETH_ALEN;
2462         for ( ; i < pkt_size; i++)
2463                 data[i] = (u8)(i & 0xff);
2464
2465         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
2466                              PCI_DMA_TODEVICE);
2467         if (dma_mapping_error(&bp->pdev->dev, map)) {
2468                 dev_kfree_skb(skb);
2469                 return -EIO;
2470         }
2471         bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
2472
2473         /* Sync BD data before updating doorbell */
2474         wmb();
2475
2476         bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | txr->tx_prod);
2477         rc = bnxt_poll_loopback(bp, pkt_size);
2478
2479         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
2480         dev_kfree_skb(skb);
2481         return rc;
2482 }
2483
2484 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
2485 {
2486         struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
2487         struct hwrm_selftest_exec_input req = {0};
2488         int rc;
2489
2490         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
2491         mutex_lock(&bp->hwrm_cmd_lock);
2492         resp->test_success = 0;
2493         req.flags = test_mask;
2494         rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
2495         *test_results = resp->test_success;
2496         mutex_unlock(&bp->hwrm_cmd_lock);
2497         return rc;
2498 }
2499
2500 #define BNXT_DRV_TESTS                  3
2501 #define BNXT_MACLPBK_TEST_IDX           (bp->num_tests - BNXT_DRV_TESTS)
2502 #define BNXT_PHYLPBK_TEST_IDX           (BNXT_MACLPBK_TEST_IDX + 1)
2503 #define BNXT_IRQ_TEST_IDX               (BNXT_MACLPBK_TEST_IDX + 2)
2504
2505 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
2506                            u64 *buf)
2507 {
2508         struct bnxt *bp = netdev_priv(dev);
2509         bool offline = false;
2510         u8 test_results = 0;
2511         u8 test_mask = 0;
2512         int rc, i;
2513
2514         if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
2515                 return;
2516         memset(buf, 0, sizeof(u64) * bp->num_tests);
2517         if (!netif_running(dev)) {
2518                 etest->flags |= ETH_TEST_FL_FAILED;
2519                 return;
2520         }
2521
2522         if (etest->flags & ETH_TEST_FL_OFFLINE) {
2523                 if (bp->pf.active_vfs) {
2524                         etest->flags |= ETH_TEST_FL_FAILED;
2525                         netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
2526                         return;
2527                 }
2528                 offline = true;
2529         }
2530
2531         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2532                 u8 bit_val = 1 << i;
2533
2534                 if (!(bp->test_info->offline_mask & bit_val))
2535                         test_mask |= bit_val;
2536                 else if (offline)
2537                         test_mask |= bit_val;
2538         }
2539         if (!offline) {
2540                 bnxt_run_fw_tests(bp, test_mask, &test_results);
2541         } else {
2542                 rc = bnxt_close_nic(bp, false, false);
2543                 if (rc)
2544                         return;
2545                 bnxt_run_fw_tests(bp, test_mask, &test_results);
2546
2547                 buf[BNXT_MACLPBK_TEST_IDX] = 1;
2548                 bnxt_hwrm_mac_loopback(bp, true);
2549                 msleep(250);
2550                 rc = bnxt_half_open_nic(bp);
2551                 if (rc) {
2552                         bnxt_hwrm_mac_loopback(bp, false);
2553                         etest->flags |= ETH_TEST_FL_FAILED;
2554                         return;
2555                 }
2556                 if (bnxt_run_loopback(bp))
2557                         etest->flags |= ETH_TEST_FL_FAILED;
2558                 else
2559                         buf[BNXT_MACLPBK_TEST_IDX] = 0;
2560
2561                 bnxt_hwrm_mac_loopback(bp, false);
2562                 bnxt_hwrm_phy_loopback(bp, true);
2563                 msleep(1000);
2564                 if (bnxt_run_loopback(bp)) {
2565                         buf[BNXT_PHYLPBK_TEST_IDX] = 1;
2566                         etest->flags |= ETH_TEST_FL_FAILED;
2567                 }
2568                 bnxt_hwrm_phy_loopback(bp, false);
2569                 bnxt_half_close_nic(bp);
2570                 bnxt_open_nic(bp, false, true);
2571         }
2572         if (bnxt_test_irq(bp)) {
2573                 buf[BNXT_IRQ_TEST_IDX] = 1;
2574                 etest->flags |= ETH_TEST_FL_FAILED;
2575         }
2576         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
2577                 u8 bit_val = 1 << i;
2578
2579                 if ((test_mask & bit_val) && !(test_results & bit_val)) {
2580                         buf[i] = 1;
2581                         etest->flags |= ETH_TEST_FL_FAILED;
2582                 }
2583         }
2584 }
2585
2586 static int bnxt_reset(struct net_device *dev, u32 *flags)
2587 {
2588         struct bnxt *bp = netdev_priv(dev);
2589         int rc = 0;
2590
2591         if (!BNXT_PF(bp)) {
2592                 netdev_err(dev, "Reset is not supported from a VF\n");
2593                 return -EOPNOTSUPP;
2594         }
2595
2596         if (pci_vfs_assigned(bp->pdev)) {
2597                 netdev_err(dev,
2598                            "Reset not allowed when VFs are assigned to VMs\n");
2599                 return -EBUSY;
2600         }
2601
2602         if (*flags == ETH_RESET_ALL) {
2603                 /* This feature is not supported in older firmware versions */
2604                 if (bp->hwrm_spec_code < 0x10803)
2605                         return -EOPNOTSUPP;
2606
2607                 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
2608                 if (!rc) {
2609                         netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
2610                         *flags = 0;
2611                 }
2612         } else if (*flags == ETH_RESET_AP) {
2613                 /* This feature is not supported in older firmware versions */
2614                 if (bp->hwrm_spec_code < 0x10803)
2615                         return -EOPNOTSUPP;
2616
2617                 rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
2618                 if (!rc) {
2619                         netdev_info(dev, "Reset Application Processor request successful.\n");
2620                         *flags = 0;
2621                 }
2622         } else {
2623                 rc = -EINVAL;
2624         }
2625
2626         return rc;
2627 }
2628
2629 void bnxt_ethtool_init(struct bnxt *bp)
2630 {
2631         struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
2632         struct hwrm_selftest_qlist_input req = {0};
2633         struct bnxt_test_info *test_info;
2634         struct net_device *dev = bp->dev;
2635         int i, rc;
2636
2637         bnxt_get_pkgver(dev);
2638
2639         if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
2640                 return;
2641
2642         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
2643         mutex_lock(&bp->hwrm_cmd_lock);
2644         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2645         if (rc)
2646                 goto ethtool_init_exit;
2647
2648         test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
2649         if (!test_info)
2650                 goto ethtool_init_exit;
2651
2652         bp->test_info = test_info;
2653         bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
2654         if (bp->num_tests > BNXT_MAX_TEST)
2655                 bp->num_tests = BNXT_MAX_TEST;
2656
2657         test_info->offline_mask = resp->offline_tests;
2658         test_info->timeout = le16_to_cpu(resp->test_timeout);
2659         if (!test_info->timeout)
2660                 test_info->timeout = HWRM_CMD_TIMEOUT;
2661         for (i = 0; i < bp->num_tests; i++) {
2662                 char *str = test_info->string[i];
2663                 char *fw_str = resp->test0_name + i * 32;
2664
2665                 if (i == BNXT_MACLPBK_TEST_IDX) {
2666                         strcpy(str, "Mac loopback test (offline)");
2667                 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
2668                         strcpy(str, "Phy loopback test (offline)");
2669                 } else if (i == BNXT_IRQ_TEST_IDX) {
2670                         strcpy(str, "Interrupt_test (offline)");
2671                 } else {
2672                         strlcpy(str, fw_str, ETH_GSTRING_LEN);
2673                         strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
2674                         if (test_info->offline_mask & (1 << i))
2675                                 strncat(str, " (offline)",
2676                                         ETH_GSTRING_LEN - strlen(str));
2677                         else
2678                                 strncat(str, " (online)",
2679                                         ETH_GSTRING_LEN - strlen(str));
2680                 }
2681         }
2682
2683 ethtool_init_exit:
2684         mutex_unlock(&bp->hwrm_cmd_lock);
2685 }
2686
2687 void bnxt_ethtool_free(struct bnxt *bp)
2688 {
2689         kfree(bp->test_info);
2690         bp->test_info = NULL;
2691 }
2692
2693 const struct ethtool_ops bnxt_ethtool_ops = {
2694         .get_link_ksettings     = bnxt_get_link_ksettings,
2695         .set_link_ksettings     = bnxt_set_link_ksettings,
2696         .get_pauseparam         = bnxt_get_pauseparam,
2697         .set_pauseparam         = bnxt_set_pauseparam,
2698         .get_drvinfo            = bnxt_get_drvinfo,
2699         .get_wol                = bnxt_get_wol,
2700         .set_wol                = bnxt_set_wol,
2701         .get_coalesce           = bnxt_get_coalesce,
2702         .set_coalesce           = bnxt_set_coalesce,
2703         .get_msglevel           = bnxt_get_msglevel,
2704         .set_msglevel           = bnxt_set_msglevel,
2705         .get_sset_count         = bnxt_get_sset_count,
2706         .get_strings            = bnxt_get_strings,
2707         .get_ethtool_stats      = bnxt_get_ethtool_stats,
2708         .set_ringparam          = bnxt_set_ringparam,
2709         .get_ringparam          = bnxt_get_ringparam,
2710         .get_channels           = bnxt_get_channels,
2711         .set_channels           = bnxt_set_channels,
2712         .get_rxnfc              = bnxt_get_rxnfc,
2713         .set_rxnfc              = bnxt_set_rxnfc,
2714         .get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
2715         .get_rxfh_key_size      = bnxt_get_rxfh_key_size,
2716         .get_rxfh               = bnxt_get_rxfh,
2717         .flash_device           = bnxt_flash_device,
2718         .get_eeprom_len         = bnxt_get_eeprom_len,
2719         .get_eeprom             = bnxt_get_eeprom,
2720         .set_eeprom             = bnxt_set_eeprom,
2721         .get_link               = bnxt_get_link,
2722         .get_eee                = bnxt_get_eee,
2723         .set_eee                = bnxt_set_eee,
2724         .get_module_info        = bnxt_get_module_info,
2725         .get_module_eeprom      = bnxt_get_module_eeprom,
2726         .nway_reset             = bnxt_nway_reset,
2727         .set_phys_id            = bnxt_set_phys_id,
2728         .self_test              = bnxt_self_test,
2729         .reset                  = bnxt_reset,
2730 };