Merge branch 'xen_split_event_channels'
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_ethtool.c
CommitLineData
de0c62db
DK
1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
de0c62db
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
f1deab50
JP
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
de0c62db
DK
20#include <linux/ethtool.h>
21#include <linux/netdevice.h>
22#include <linux/types.h>
23#include <linux/sched.h>
24#include <linux/crc32.h>
de0c62db
DK
25#include "bnx2x.h"
26#include "bnx2x_cmn.h"
27#include "bnx2x_dump.h"
4a33bc03 28#include "bnx2x_init.h"
de0c62db 29
ec6ba945
VZ
30/* Note: in the format strings below %s is replaced by the queue-name which is
31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
32 * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
33 */
34#define MAX_QUEUE_NAME_LEN 4
35static const struct {
36 long offset;
37 int size;
38 char string[ETH_GSTRING_LEN];
39} bnx2x_q_stats_arr[] = {
40/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
ec6ba945
VZ
41 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
42 8, "[%s]: rx_ucast_packets" },
43 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
44 8, "[%s]: rx_mcast_packets" },
45 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
46 8, "[%s]: rx_bcast_packets" },
47 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
48 { Q_STATS_OFFSET32(rx_err_discard_pkt),
49 4, "[%s]: rx_phy_ip_err_discards"},
50 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
51 4, "[%s]: rx_skb_alloc_discard" },
52 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
53
619c5cb6
VZ
54 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
55/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
ec6ba945
VZ
56 8, "[%s]: tx_ucast_packets" },
57 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
58 8, "[%s]: tx_mcast_packets" },
59 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
619c5cb6
VZ
60 8, "[%s]: tx_bcast_packets" },
61 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
62 8, "[%s]: tpa_aggregations" },
63 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
64 8, "[%s]: tpa_aggregated_frames"},
c96bdc0c
DK
65 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
66 { Q_STATS_OFFSET32(driver_filtered_tx_pkt),
67 4, "[%s]: driver_filtered_tx_pkt" }
ec6ba945
VZ
68};
69
70#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
71
72static const struct {
73 long offset;
74 int size;
75 u32 flags;
76#define STATS_FLAGS_PORT 1
77#define STATS_FLAGS_FUNC 2
78#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
79 char string[ETH_GSTRING_LEN];
80} bnx2x_stats_arr[] = {
81/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
82 8, STATS_FLAGS_BOTH, "rx_bytes" },
83 { STATS_OFFSET32(error_bytes_received_hi),
84 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
85 { STATS_OFFSET32(total_unicast_packets_received_hi),
86 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
87 { STATS_OFFSET32(total_multicast_packets_received_hi),
88 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
89 { STATS_OFFSET32(total_broadcast_packets_received_hi),
90 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
91 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
92 8, STATS_FLAGS_PORT, "rx_crc_errors" },
93 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
94 8, STATS_FLAGS_PORT, "rx_align_errors" },
95 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
96 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
97 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
98 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
99/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
100 8, STATS_FLAGS_PORT, "rx_fragments" },
101 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
102 8, STATS_FLAGS_PORT, "rx_jabbers" },
103 { STATS_OFFSET32(no_buff_discard_hi),
104 8, STATS_FLAGS_BOTH, "rx_discards" },
105 { STATS_OFFSET32(mac_filter_discard),
106 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
619c5cb6
VZ
107 { STATS_OFFSET32(mf_tag_discard),
108 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
0e898dd7
BW
109 { STATS_OFFSET32(pfc_frames_received_hi),
110 8, STATS_FLAGS_PORT, "pfc_frames_received" },
111 { STATS_OFFSET32(pfc_frames_sent_hi),
112 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
ec6ba945
VZ
113 { STATS_OFFSET32(brb_drop_hi),
114 8, STATS_FLAGS_PORT, "rx_brb_discard" },
115 { STATS_OFFSET32(brb_truncate_hi),
116 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
117 { STATS_OFFSET32(pause_frames_received_hi),
118 8, STATS_FLAGS_PORT, "rx_pause_frames" },
119 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
120 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
121 { STATS_OFFSET32(nig_timer_max),
122 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
123/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
124 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
125 { STATS_OFFSET32(rx_skb_alloc_failed),
126 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
127 { STATS_OFFSET32(hw_csum_err),
128 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
129
130 { STATS_OFFSET32(total_bytes_transmitted_hi),
131 8, STATS_FLAGS_BOTH, "tx_bytes" },
132 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
133 8, STATS_FLAGS_PORT, "tx_error_bytes" },
134 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
135 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
136 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
137 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
138 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
139 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
140 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
141 8, STATS_FLAGS_PORT, "tx_mac_errors" },
142 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
143 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
144/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
145 8, STATS_FLAGS_PORT, "tx_single_collisions" },
146 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
147 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
148 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
149 8, STATS_FLAGS_PORT, "tx_deferred" },
150 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
151 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
152 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
153 8, STATS_FLAGS_PORT, "tx_late_collisions" },
154 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
155 8, STATS_FLAGS_PORT, "tx_total_collisions" },
156 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
157 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
158 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
159 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
160 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
161 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
162 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
163 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
164/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
165 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
166 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
167 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
168 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
169 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
170 { STATS_OFFSET32(pause_frames_sent_hi),
619c5cb6
VZ
171 8, STATS_FLAGS_PORT, "tx_pause_frames" },
172 { STATS_OFFSET32(total_tpa_aggregations_hi),
173 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
174 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
175 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
176 { STATS_OFFSET32(total_tpa_bytes_hi),
7a752993
AE
177 8, STATS_FLAGS_FUNC, "tpa_bytes"},
178 { STATS_OFFSET32(recoverable_error),
179 4, STATS_FLAGS_FUNC, "recoverable_errors" },
180 { STATS_OFFSET32(unrecoverable_error),
181 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
c96bdc0c
DK
182 { STATS_OFFSET32(driver_filtered_tx_pkt),
183 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" },
e9939c80
YM
184 { STATS_OFFSET32(eee_tx_lpi),
185 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
ec6ba945
VZ
186};
187
188#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
07ba6af4 189
1ac9e428
YR
190static int bnx2x_get_port_type(struct bnx2x *bp)
191{
192 int port_type;
193 u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
194 switch (bp->link_params.phy[phy_idx].media_type) {
dbef807e
YM
195 case ETH_PHY_SFPP_10G_FIBER:
196 case ETH_PHY_SFP_1G_FIBER:
1ac9e428
YR
197 case ETH_PHY_XFP_FIBER:
198 case ETH_PHY_KR:
199 case ETH_PHY_CX4:
200 port_type = PORT_FIBRE;
201 break;
202 case ETH_PHY_DA_TWINAX:
203 port_type = PORT_DA;
204 break;
205 case ETH_PHY_BASE_T:
206 port_type = PORT_TP;
207 break;
208 case ETH_PHY_NOT_PRESENT:
209 port_type = PORT_NONE;
210 break;
211 case ETH_PHY_UNSPECIFIED:
212 default:
213 port_type = PORT_OTHER;
214 break;
215 }
216 return port_type;
217}
ec6ba945 218
de0c62db
DK
219static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
220{
221 struct bnx2x *bp = netdev_priv(dev);
a22f0788 222 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
b3337e4c 223
a22f0788
YR
224 /* Dual Media boards present all available port types */
225 cmd->supported = bp->port.supported[cfg_idx] |
226 (bp->port.supported[cfg_idx ^ 1] &
227 (SUPPORTED_TP | SUPPORTED_FIBRE));
228 cmd->advertising = bp->port.advertising[cfg_idx];
dbef807e
YM
229 if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
230 ETH_PHY_SFP_1G_FIBER) {
231 cmd->supported &= ~(SUPPORTED_10000baseT_Full);
232 cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
233 }
de0c62db 234
59694f00
YM
235 if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
236 !(bp->flags & MF_FUNC_DIS)) {
2de67439 237 cmd->duplex = bp->link_vars.duplex;
38298461
YM
238
239 if (IS_MF(bp) && !BP_NOMCP(bp))
240 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
59694f00
YM
241 else
242 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
de0c62db 243 } else {
38298461
YM
244 cmd->duplex = DUPLEX_UNKNOWN;
245 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
de0c62db 246 }
f2e0899f 247
1ac9e428 248 cmd->port = bnx2x_get_port_type(bp);
a22f0788 249
de0c62db
DK
250 cmd->phy_address = bp->mdio.prtad;
251 cmd->transceiver = XCVR_INTERNAL;
252
a22f0788 253 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
de0c62db
DK
254 cmd->autoneg = AUTONEG_ENABLE;
255 else
256 cmd->autoneg = AUTONEG_DISABLE;
257
9e7e8399
MY
258 /* Publish LP advertised speeds and FC */
259 if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
260 u32 status = bp->link_vars.link_status;
261
262 cmd->lp_advertising |= ADVERTISED_Autoneg;
263 if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
264 cmd->lp_advertising |= ADVERTISED_Pause;
265 if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
266 cmd->lp_advertising |= ADVERTISED_Asym_Pause;
267
268 if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
269 cmd->lp_advertising |= ADVERTISED_10baseT_Half;
270 if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
271 cmd->lp_advertising |= ADVERTISED_10baseT_Full;
272 if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
273 cmd->lp_advertising |= ADVERTISED_100baseT_Half;
274 if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
275 cmd->lp_advertising |= ADVERTISED_100baseT_Full;
276 if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
277 cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
278 if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE)
279 cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
280 if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
281 cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
282 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
283 cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
be94bea7
YR
284 if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
285 cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
9e7e8399
MY
286 }
287
de0c62db
DK
288 cmd->maxtxpkt = 0;
289 cmd->maxrxpkt = 0;
290
51c1a580 291 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
f1deab50
JP
292 " supported 0x%x advertising 0x%x speed %u\n"
293 " duplex %d port %d phy_address %d transceiver %d\n"
294 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
b3337e4c
DD
295 cmd->cmd, cmd->supported, cmd->advertising,
296 ethtool_cmd_speed(cmd),
de0c62db
DK
297 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
298 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
299
300 return 0;
301}
302
303static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
304{
305 struct bnx2x *bp = netdev_priv(dev);
a22f0788 306 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
dbef807e 307 u32 speed, phy_idx;
de0c62db 308
0793f83f 309 if (IS_MF_SD(bp))
de0c62db
DK
310 return 0;
311
51c1a580 312 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
b3337e4c 313 " supported 0x%x advertising 0x%x speed %u\n"
0793f83f
DK
314 " duplex %d port %d phy_address %d transceiver %d\n"
315 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
b3337e4c
DD
316 cmd->cmd, cmd->supported, cmd->advertising,
317 ethtool_cmd_speed(cmd),
de0c62db
DK
318 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
319 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
320
b3337e4c 321 speed = ethtool_cmd_speed(cmd);
0793f83f 322
38298461
YM
323 /* If recieved a request for an unknown duplex, assume full*/
324 if (cmd->duplex == DUPLEX_UNKNOWN)
325 cmd->duplex = DUPLEX_FULL;
326
0793f83f 327 if (IS_MF_SI(bp)) {
e3835b99 328 u32 part;
0793f83f
DK
329 u32 line_speed = bp->link_vars.line_speed;
330
331 /* use 10G if no link detected */
332 if (!line_speed)
333 line_speed = 10000;
334
335 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
51c1a580
MS
336 DP(BNX2X_MSG_ETHTOOL,
337 "To set speed BC %X or higher is required, please upgrade BC\n",
338 REQ_BC_VER_4_SET_MF_BW);
0793f83f
DK
339 return -EINVAL;
340 }
e3835b99 341
faa6fcbb 342 part = (speed * 100) / line_speed;
e3835b99 343
faa6fcbb 344 if (line_speed < speed || !part) {
51c1a580
MS
345 DP(BNX2X_MSG_ETHTOOL,
346 "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
0793f83f
DK
347 return -EINVAL;
348 }
0793f83f 349
e3835b99
DK
350 if (bp->state != BNX2X_STATE_OPEN)
351 /* store value for following "load" */
352 bp->pending_max = part;
353 else
354 bnx2x_update_max_mf_config(bp, part);
0793f83f 355
0793f83f
DK
356 return 0;
357 }
358
a22f0788
YR
359 cfg_idx = bnx2x_get_link_cfg_idx(bp);
360 old_multi_phy_config = bp->link_params.multi_phy_config;
361 switch (cmd->port) {
362 case PORT_TP:
363 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
364 break; /* no port change */
365
366 if (!(bp->port.supported[0] & SUPPORTED_TP ||
367 bp->port.supported[1] & SUPPORTED_TP)) {
51c1a580 368 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
a22f0788
YR
369 return -EINVAL;
370 }
371 bp->link_params.multi_phy_config &=
372 ~PORT_HW_CFG_PHY_SELECTION_MASK;
373 if (bp->link_params.multi_phy_config &
374 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
375 bp->link_params.multi_phy_config |=
376 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
377 else
378 bp->link_params.multi_phy_config |=
379 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
380 break;
381 case PORT_FIBRE:
bfdb5823 382 case PORT_DA:
a22f0788
YR
383 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
384 break; /* no port change */
385
386 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
387 bp->port.supported[1] & SUPPORTED_FIBRE)) {
51c1a580 388 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
a22f0788
YR
389 return -EINVAL;
390 }
391 bp->link_params.multi_phy_config &=
392 ~PORT_HW_CFG_PHY_SELECTION_MASK;
393 if (bp->link_params.multi_phy_config &
394 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
395 bp->link_params.multi_phy_config |=
396 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
397 else
398 bp->link_params.multi_phy_config |=
399 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
400 break;
401 default:
51c1a580 402 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
a22f0788
YR
403 return -EINVAL;
404 }
2de67439 405 /* Save new config in case command complete successfully */
a22f0788
YR
406 new_multi_phy_config = bp->link_params.multi_phy_config;
407 /* Get the new cfg_idx */
408 cfg_idx = bnx2x_get_link_cfg_idx(bp);
409 /* Restore old config in case command failed */
410 bp->link_params.multi_phy_config = old_multi_phy_config;
51c1a580 411 DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
a22f0788 412
de0c62db 413 if (cmd->autoneg == AUTONEG_ENABLE) {
75318327
YR
414 u32 an_supported_speed = bp->port.supported[cfg_idx];
415 if (bp->link_params.phy[EXT_PHY1].type ==
416 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
417 an_supported_speed |= (SUPPORTED_100baseT_Half |
418 SUPPORTED_100baseT_Full);
a22f0788 419 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
51c1a580 420 DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
de0c62db
DK
421 return -EINVAL;
422 }
423
424 /* advertise the requested speed and duplex if supported */
75318327 425 if (cmd->advertising & ~an_supported_speed) {
51c1a580
MS
426 DP(BNX2X_MSG_ETHTOOL,
427 "Advertisement parameters are not supported\n");
8d661637
YR
428 return -EINVAL;
429 }
de0c62db 430
a22f0788 431 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
8d661637
YR
432 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
433 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
de0c62db 434 cmd->advertising);
8d661637
YR
435 if (cmd->advertising) {
436
437 bp->link_params.speed_cap_mask[cfg_idx] = 0;
438 if (cmd->advertising & ADVERTISED_10baseT_Half) {
439 bp->link_params.speed_cap_mask[cfg_idx] |=
440 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
441 }
442 if (cmd->advertising & ADVERTISED_10baseT_Full)
443 bp->link_params.speed_cap_mask[cfg_idx] |=
444 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
de0c62db 445
8d661637
YR
446 if (cmd->advertising & ADVERTISED_100baseT_Full)
447 bp->link_params.speed_cap_mask[cfg_idx] |=
448 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
449
450 if (cmd->advertising & ADVERTISED_100baseT_Half) {
451 bp->link_params.speed_cap_mask[cfg_idx] |=
452 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
453 }
454 if (cmd->advertising & ADVERTISED_1000baseT_Half) {
455 bp->link_params.speed_cap_mask[cfg_idx] |=
456 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
457 }
458 if (cmd->advertising & (ADVERTISED_1000baseT_Full |
459 ADVERTISED_1000baseKX_Full))
460 bp->link_params.speed_cap_mask[cfg_idx] |=
461 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
462
463 if (cmd->advertising & (ADVERTISED_10000baseT_Full |
464 ADVERTISED_10000baseKX4_Full |
465 ADVERTISED_10000baseKR_Full))
466 bp->link_params.speed_cap_mask[cfg_idx] |=
467 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
be94bea7
YR
468
469 if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
470 bp->link_params.speed_cap_mask[cfg_idx] |=
471 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
8d661637 472 }
de0c62db
DK
473 } else { /* forced speed */
474 /* advertise the requested speed and duplex if supported */
a22f0788 475 switch (speed) {
de0c62db
DK
476 case SPEED_10:
477 if (cmd->duplex == DUPLEX_FULL) {
a22f0788 478 if (!(bp->port.supported[cfg_idx] &
de0c62db 479 SUPPORTED_10baseT_Full)) {
51c1a580 480 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
481 "10M full not supported\n");
482 return -EINVAL;
483 }
484
485 advertising = (ADVERTISED_10baseT_Full |
486 ADVERTISED_TP);
487 } else {
a22f0788 488 if (!(bp->port.supported[cfg_idx] &
de0c62db 489 SUPPORTED_10baseT_Half)) {
51c1a580 490 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
491 "10M half not supported\n");
492 return -EINVAL;
493 }
494
495 advertising = (ADVERTISED_10baseT_Half |
496 ADVERTISED_TP);
497 }
498 break;
499
500 case SPEED_100:
501 if (cmd->duplex == DUPLEX_FULL) {
a22f0788 502 if (!(bp->port.supported[cfg_idx] &
de0c62db 503 SUPPORTED_100baseT_Full)) {
51c1a580 504 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
505 "100M full not supported\n");
506 return -EINVAL;
507 }
508
509 advertising = (ADVERTISED_100baseT_Full |
510 ADVERTISED_TP);
511 } else {
a22f0788 512 if (!(bp->port.supported[cfg_idx] &
de0c62db 513 SUPPORTED_100baseT_Half)) {
51c1a580 514 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
515 "100M half not supported\n");
516 return -EINVAL;
517 }
518
519 advertising = (ADVERTISED_100baseT_Half |
520 ADVERTISED_TP);
521 }
522 break;
523
524 case SPEED_1000:
525 if (cmd->duplex != DUPLEX_FULL) {
51c1a580
MS
526 DP(BNX2X_MSG_ETHTOOL,
527 "1G half not supported\n");
de0c62db
DK
528 return -EINVAL;
529 }
530
a22f0788
YR
531 if (!(bp->port.supported[cfg_idx] &
532 SUPPORTED_1000baseT_Full)) {
51c1a580
MS
533 DP(BNX2X_MSG_ETHTOOL,
534 "1G full not supported\n");
de0c62db
DK
535 return -EINVAL;
536 }
537
538 advertising = (ADVERTISED_1000baseT_Full |
539 ADVERTISED_TP);
540 break;
541
542 case SPEED_2500:
543 if (cmd->duplex != DUPLEX_FULL) {
51c1a580 544 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
545 "2.5G half not supported\n");
546 return -EINVAL;
547 }
548
a22f0788
YR
549 if (!(bp->port.supported[cfg_idx]
550 & SUPPORTED_2500baseX_Full)) {
51c1a580 551 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
552 "2.5G full not supported\n");
553 return -EINVAL;
554 }
555
556 advertising = (ADVERTISED_2500baseX_Full |
557 ADVERTISED_TP);
558 break;
559
560 case SPEED_10000:
561 if (cmd->duplex != DUPLEX_FULL) {
51c1a580
MS
562 DP(BNX2X_MSG_ETHTOOL,
563 "10G half not supported\n");
de0c62db
DK
564 return -EINVAL;
565 }
dbef807e 566 phy_idx = bnx2x_get_cur_phy_idx(bp);
a22f0788 567 if (!(bp->port.supported[cfg_idx]
dbef807e
YM
568 & SUPPORTED_10000baseT_Full) ||
569 (bp->link_params.phy[phy_idx].media_type ==
570 ETH_PHY_SFP_1G_FIBER)) {
51c1a580
MS
571 DP(BNX2X_MSG_ETHTOOL,
572 "10G full not supported\n");
de0c62db
DK
573 return -EINVAL;
574 }
575
576 advertising = (ADVERTISED_10000baseT_Full |
577 ADVERTISED_FIBRE);
578 break;
579
580 default:
51c1a580 581 DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
de0c62db
DK
582 return -EINVAL;
583 }
584
a22f0788
YR
585 bp->link_params.req_line_speed[cfg_idx] = speed;
586 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
587 bp->port.advertising[cfg_idx] = advertising;
de0c62db
DK
588 }
589
51c1a580 590 DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
f1deab50 591 " req_duplex %d advertising 0x%x\n",
a22f0788
YR
592 bp->link_params.req_line_speed[cfg_idx],
593 bp->link_params.req_duplex[cfg_idx],
594 bp->port.advertising[cfg_idx]);
de0c62db 595
a22f0788
YR
596 /* Set new config */
597 bp->link_params.multi_phy_config = new_multi_phy_config;
de0c62db
DK
598 if (netif_running(dev)) {
599 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
600 bnx2x_link_set(bp);
601 }
602
603 return 0;
604}
605
07ba6af4
MS
606#define DUMP_ALL_PRESETS 0x1FFF
607#define DUMP_MAX_PRESETS 13
0fea29c1 608
07ba6af4 609static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
0fea29c1
VZ
610{
611 if (CHIP_IS_E1(bp))
07ba6af4 612 return dump_num_registers[0][preset-1];
0fea29c1 613 else if (CHIP_IS_E1H(bp))
07ba6af4 614 return dump_num_registers[1][preset-1];
0fea29c1 615 else if (CHIP_IS_E2(bp))
07ba6af4 616 return dump_num_registers[2][preset-1];
0fea29c1 617 else if (CHIP_IS_E3A0(bp))
07ba6af4 618 return dump_num_registers[3][preset-1];
0fea29c1 619 else if (CHIP_IS_E3B0(bp))
07ba6af4 620 return dump_num_registers[4][preset-1];
0fea29c1 621 else
07ba6af4
MS
622 return 0;
623}
624
625static int __bnx2x_get_regs_len(struct bnx2x *bp)
626{
627 u32 preset_idx;
628 int regdump_len = 0;
629
630 /* Calculate the total preset regs length */
631 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
632 regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
633
634 return regdump_len;
635}
636
637static int bnx2x_get_regs_len(struct net_device *dev)
638{
639 struct bnx2x *bp = netdev_priv(dev);
640 int regdump_len = 0;
641
642 regdump_len = __bnx2x_get_regs_len(bp);
643 regdump_len *= 4;
644 regdump_len += sizeof(struct dump_header);
645
646 return regdump_len;
0fea29c1
VZ
647}
648
07ba6af4
MS
649#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
650#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
651#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
652#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
653#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
654
655#define IS_REG_IN_PRESET(presets, idx) \
656 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
657
0fea29c1 658/******* Paged registers info selectors ********/
1191cb83 659static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
0fea29c1
VZ
660{
661 if (CHIP_IS_E2(bp))
662 return page_vals_e2;
663 else if (CHIP_IS_E3(bp))
664 return page_vals_e3;
665 else
666 return NULL;
667}
668
1191cb83 669static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
0fea29c1
VZ
670{
671 if (CHIP_IS_E2(bp))
672 return PAGE_MODE_VALUES_E2;
673 else if (CHIP_IS_E3(bp))
674 return PAGE_MODE_VALUES_E3;
675 else
676 return 0;
677}
678
1191cb83 679static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
0fea29c1
VZ
680{
681 if (CHIP_IS_E2(bp))
682 return page_write_regs_e2;
683 else if (CHIP_IS_E3(bp))
684 return page_write_regs_e3;
685 else
686 return NULL;
687}
688
1191cb83 689static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
0fea29c1
VZ
690{
691 if (CHIP_IS_E2(bp))
692 return PAGE_WRITE_REGS_E2;
693 else if (CHIP_IS_E3(bp))
694 return PAGE_WRITE_REGS_E3;
695 else
696 return 0;
697}
698
1191cb83 699static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
0fea29c1
VZ
700{
701 if (CHIP_IS_E2(bp))
702 return page_read_regs_e2;
703 else if (CHIP_IS_E3(bp))
704 return page_read_regs_e3;
705 else
706 return NULL;
707}
708
1191cb83 709static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
0fea29c1
VZ
710{
711 if (CHIP_IS_E2(bp))
712 return PAGE_READ_REGS_E2;
713 else if (CHIP_IS_E3(bp))
714 return PAGE_READ_REGS_E3;
715 else
716 return 0;
717}
718
07ba6af4
MS
719static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
720 const struct reg_addr *reg_info)
0fea29c1 721{
07ba6af4
MS
722 if (CHIP_IS_E1(bp))
723 return IS_E1_REG(reg_info->chips);
724 else if (CHIP_IS_E1H(bp))
725 return IS_E1H_REG(reg_info->chips);
726 else if (CHIP_IS_E2(bp))
727 return IS_E2_REG(reg_info->chips);
728 else if (CHIP_IS_E3A0(bp))
729 return IS_E3A0_REG(reg_info->chips);
730 else if (CHIP_IS_E3B0(bp))
731 return IS_E3B0_REG(reg_info->chips);
732 else
733 return false;
0fea29c1 734}
de0c62db 735
de0c62db 736
07ba6af4
MS
737static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
738 const struct wreg_addr *wreg_info)
739{
740 if (CHIP_IS_E1(bp))
741 return IS_E1_REG(wreg_info->chips);
742 else if (CHIP_IS_E1H(bp))
743 return IS_E1H_REG(wreg_info->chips);
744 else if (CHIP_IS_E2(bp))
745 return IS_E2_REG(wreg_info->chips);
746 else if (CHIP_IS_E3A0(bp))
747 return IS_E3A0_REG(wreg_info->chips);
748 else if (CHIP_IS_E3B0(bp))
749 return IS_E3B0_REG(wreg_info->chips);
750 else
751 return false;
de0c62db
DK
752}
753
0fea29c1
VZ
754/**
755 * bnx2x_read_pages_regs - read "paged" registers
756 *
757 * @bp device handle
758 * @p output buffer
759 *
2de67439
YM
760 * Reads "paged" memories: memories that may only be read by first writing to a
761 * specific address ("write address") and then reading from a specific address
762 * ("read address"). There may be more than one write address per "page" and
763 * more than one read address per write address.
0fea29c1 764 */
07ba6af4 765static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
f2e0899f
DK
766{
767 u32 i, j, k, n;
07ba6af4 768
0fea29c1
VZ
769 /* addresses of the paged registers */
770 const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
771 /* number of paged registers */
772 int num_pages = __bnx2x_get_page_reg_num(bp);
773 /* write addresses */
774 const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
775 /* number of write addresses */
776 int write_num = __bnx2x_get_page_write_num(bp);
777 /* read addresses info */
778 const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
779 /* number of read addresses */
780 int read_num = __bnx2x_get_page_read_num(bp);
07ba6af4 781 u32 addr, size;
0fea29c1
VZ
782
783 for (i = 0; i < num_pages; i++) {
784 for (j = 0; j < write_num; j++) {
785 REG_WR(bp, write_addr[j], page_addr[i]);
07ba6af4
MS
786
787 for (k = 0; k < read_num; k++) {
788 if (IS_REG_IN_PRESET(read_addr[k].presets,
789 preset)) {
790 size = read_addr[k].size;
791 for (n = 0; n < size; n++) {
792 addr = read_addr[k].addr + n*4;
793 *p++ = REG_RD(bp, addr);
794 }
795 }
796 }
f2e0899f
DK
797 }
798 }
799}
800
07ba6af4 801static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
0fea29c1 802{
07ba6af4
MS
803 u32 i, j, addr;
804 const struct wreg_addr *wreg_addr_p = NULL;
805
806 if (CHIP_IS_E1(bp))
807 wreg_addr_p = &wreg_addr_e1;
808 else if (CHIP_IS_E1H(bp))
809 wreg_addr_p = &wreg_addr_e1h;
810 else if (CHIP_IS_E2(bp))
811 wreg_addr_p = &wreg_addr_e2;
812 else if (CHIP_IS_E3A0(bp))
813 wreg_addr_p = &wreg_addr_e3;
814 else if (CHIP_IS_E3B0(bp))
815 wreg_addr_p = &wreg_addr_e3b0;
816
817 /* Read the idle_chk registers */
818 for (i = 0; i < IDLE_REGS_COUNT; i++) {
819 if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
820 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
821 for (j = 0; j < idle_reg_addrs[i].size; j++)
822 *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
823 }
824 }
0fea29c1
VZ
825
826 /* Read the regular registers */
07ba6af4
MS
827 for (i = 0; i < REGS_COUNT; i++) {
828 if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
829 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
0fea29c1
VZ
830 for (j = 0; j < reg_addrs[i].size; j++)
831 *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
07ba6af4
MS
832 }
833 }
834
835 /* Read the CAM registers */
836 if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
837 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
838 for (i = 0; i < wreg_addr_p->size; i++) {
839 *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
840
841 /* In case of wreg_addr register, read additional
842 registers from read_regs array
843 */
844 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
845 addr = *(wreg_addr_p->read_regs);
846 *p++ = REG_RD(bp, addr + j*4);
847 }
848 }
849 }
850
851 /* Paged registers are supported in E2 & E3 only */
852 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
853 /* Read "paged" registes */
854 bnx2x_read_pages_regs(bp, p, preset);
855 }
856
857 return 0;
858}
859
860static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
861{
862 u32 preset_idx;
0fea29c1 863
07ba6af4
MS
864 /* Read all registers, by reading all preset registers */
865 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
866 /* Skip presets with IOR */
867 if ((preset_idx == 2) ||
868 (preset_idx == 5) ||
869 (preset_idx == 8) ||
870 (preset_idx == 11))
871 continue;
872 __bnx2x_get_preset_regs(bp, p, preset_idx);
873 p += __bnx2x_get_preset_regs_len(bp, preset_idx);
874 }
0fea29c1
VZ
875}
876
de0c62db
DK
877static void bnx2x_get_regs(struct net_device *dev,
878 struct ethtool_regs *regs, void *_p)
879{
0fea29c1 880 u32 *p = _p;
de0c62db 881 struct bnx2x *bp = netdev_priv(dev);
07ba6af4 882 struct dump_header dump_hdr = {0};
de0c62db 883
07ba6af4 884 regs->version = 2;
de0c62db
DK
885 memset(p, 0, regs->len);
886
887 if (!netif_running(bp->dev))
888 return;
889
4a33bc03
VZ
890 /* Disable parity attentions as long as following dump may
891 * cause false alarms by reading never written registers. We
892 * will re-enable parity attentions right after the dump.
893 */
07ba6af4
MS
894
895 /* Disable parity on path 0 */
896 bnx2x_pretend_func(bp, 0);
4a33bc03
VZ
897 bnx2x_disable_blocks_parity(bp);
898
07ba6af4
MS
899 /* Disable parity on path 1 */
900 bnx2x_pretend_func(bp, 1);
901 bnx2x_disable_blocks_parity(bp);
f2e0899f 902
07ba6af4
MS
903 /* Return to current function */
904 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
de0c62db 905
07ba6af4
MS
906 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
907 dump_hdr.preset = DUMP_ALL_PRESETS;
908 dump_hdr.version = BNX2X_DUMP_VERSION;
909
910 /* dump_meta_data presents OR of CHIP and PATH. */
911 if (CHIP_IS_E1(bp)) {
912 dump_hdr.dump_meta_data = DUMP_CHIP_E1;
913 } else if (CHIP_IS_E1H(bp)) {
914 dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
915 } else if (CHIP_IS_E2(bp)) {
916 dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
917 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
918 } else if (CHIP_IS_E3A0(bp)) {
919 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
920 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
921 } else if (CHIP_IS_E3B0(bp)) {
922 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
923 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
924 }
925
926 memcpy(p, &dump_hdr, sizeof(struct dump_header));
927 p += dump_hdr.header_size + 1;
de0c62db 928
0fea29c1
VZ
929 /* Actually read the registers */
930 __bnx2x_get_regs(bp, p);
931
07ba6af4
MS
932 /* Re-enable parity attentions on path 0 */
933 bnx2x_pretend_func(bp, 0);
934 bnx2x_clear_blocks_parity(bp);
935 bnx2x_enable_blocks_parity(bp);
936
937 /* Re-enable parity attentions on path 1 */
938 bnx2x_pretend_func(bp, 1);
4a33bc03 939 bnx2x_clear_blocks_parity(bp);
c9ee9206 940 bnx2x_enable_blocks_parity(bp);
07ba6af4
MS
941
942 /* Return to current function */
943 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
944}
945
946static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
947{
948 struct bnx2x *bp = netdev_priv(dev);
949 int regdump_len = 0;
950
951 regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
952 regdump_len *= 4;
953 regdump_len += sizeof(struct dump_header);
954
955 return regdump_len;
956}
957
958static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
959{
960 struct bnx2x *bp = netdev_priv(dev);
961
962 /* Use the ethtool_dump "flag" field as the dump preset index */
963 bp->dump_preset_idx = val->flag;
964 return 0;
965}
966
967static int bnx2x_get_dump_flag(struct net_device *dev,
968 struct ethtool_dump *dump)
969{
970 struct bnx2x *bp = netdev_priv(dev);
971
972 /* Calculate the requested preset idx length */
973 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
974 DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
975 bp->dump_preset_idx, dump->len);
976
977 dump->flag = ETHTOOL_GET_DUMP_DATA;
978 return 0;
979}
980
981static int bnx2x_get_dump_data(struct net_device *dev,
982 struct ethtool_dump *dump,
983 void *buffer)
984{
985 u32 *p = buffer;
986 struct bnx2x *bp = netdev_priv(dev);
987 struct dump_header dump_hdr = {0};
988
989 memset(p, 0, dump->len);
990
991 /* Disable parity attentions as long as following dump may
992 * cause false alarms by reading never written registers. We
993 * will re-enable parity attentions right after the dump.
994 */
995
996 /* Disable parity on path 0 */
997 bnx2x_pretend_func(bp, 0);
998 bnx2x_disable_blocks_parity(bp);
999
1000 /* Disable parity on path 1 */
1001 bnx2x_pretend_func(bp, 1);
1002 bnx2x_disable_blocks_parity(bp);
1003
1004 /* Return to current function */
1005 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1006
1007 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
1008 dump_hdr.preset = bp->dump_preset_idx;
1009 dump_hdr.version = BNX2X_DUMP_VERSION;
1010
1011 DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
1012
1013 /* dump_meta_data presents OR of CHIP and PATH. */
1014 if (CHIP_IS_E1(bp)) {
1015 dump_hdr.dump_meta_data = DUMP_CHIP_E1;
1016 } else if (CHIP_IS_E1H(bp)) {
1017 dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
1018 } else if (CHIP_IS_E2(bp)) {
1019 dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
1020 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1021 } else if (CHIP_IS_E3A0(bp)) {
1022 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
1023 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1024 } else if (CHIP_IS_E3B0(bp)) {
1025 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
1026 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1027 }
1028
1029 memcpy(p, &dump_hdr, sizeof(struct dump_header));
1030 p += dump_hdr.header_size + 1;
1031
1032 /* Actually read the registers */
1033 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
1034
1035 /* Re-enable parity attentions on path 0 */
1036 bnx2x_pretend_func(bp, 0);
1037 bnx2x_clear_blocks_parity(bp);
1038 bnx2x_enable_blocks_parity(bp);
1039
1040 /* Re-enable parity attentions on path 1 */
1041 bnx2x_pretend_func(bp, 1);
1042 bnx2x_clear_blocks_parity(bp);
1043 bnx2x_enable_blocks_parity(bp);
1044
1045 /* Return to current function */
1046 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1047
1048 return 0;
de0c62db
DK
1049}
1050
de0c62db
DK
1051static void bnx2x_get_drvinfo(struct net_device *dev,
1052 struct ethtool_drvinfo *info)
1053{
1054 struct bnx2x *bp = netdev_priv(dev);
de0c62db 1055
68aad78c
RJ
1056 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1057 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
de0c62db 1058
8ca5e17e
AE
1059 bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
1060
68aad78c 1061 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
de0c62db 1062 info->n_stats = BNX2X_NUM_STATS;
cf2c1df6 1063 info->testinfo_len = BNX2X_NUM_TESTS(bp);
de0c62db
DK
1064 info->eedump_len = bp->common.flash_size;
1065 info->regdump_len = bnx2x_get_regs_len(dev);
1066}
1067
1068static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1069{
1070 struct bnx2x *bp = netdev_priv(dev);
1071
1072 if (bp->flags & NO_WOL_FLAG) {
1073 wol->supported = 0;
1074 wol->wolopts = 0;
1075 } else {
1076 wol->supported = WAKE_MAGIC;
1077 if (bp->wol)
1078 wol->wolopts = WAKE_MAGIC;
1079 else
1080 wol->wolopts = 0;
1081 }
1082 memset(&wol->sopass, 0, sizeof(wol->sopass));
1083}
1084
1085static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1086{
1087 struct bnx2x *bp = netdev_priv(dev);
1088
51c1a580 1089 if (wol->wolopts & ~WAKE_MAGIC) {
2de67439 1090 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
de0c62db 1091 return -EINVAL;
51c1a580 1092 }
de0c62db
DK
1093
1094 if (wol->wolopts & WAKE_MAGIC) {
51c1a580 1095 if (bp->flags & NO_WOL_FLAG) {
2de67439 1096 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
de0c62db 1097 return -EINVAL;
51c1a580 1098 }
de0c62db
DK
1099 bp->wol = 1;
1100 } else
1101 bp->wol = 0;
1102
1103 return 0;
1104}
1105
1106static u32 bnx2x_get_msglevel(struct net_device *dev)
1107{
1108 struct bnx2x *bp = netdev_priv(dev);
1109
1110 return bp->msg_enable;
1111}
1112
1113static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
1114{
1115 struct bnx2x *bp = netdev_priv(dev);
1116
7a25cc73
DK
1117 if (capable(CAP_NET_ADMIN)) {
1118 /* dump MCP trace */
ad5afc89 1119 if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
7a25cc73 1120 bnx2x_fw_dump_lvl(bp, KERN_INFO);
de0c62db 1121 bp->msg_enable = level;
7a25cc73 1122 }
de0c62db
DK
1123}
1124
1125static int bnx2x_nway_reset(struct net_device *dev)
1126{
1127 struct bnx2x *bp = netdev_priv(dev);
1128
1129 if (!bp->port.pmf)
1130 return 0;
1131
1132 if (netif_running(dev)) {
1133 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
5d07d868 1134 bnx2x_force_link_reset(bp);
de0c62db
DK
1135 bnx2x_link_set(bp);
1136 }
1137
1138 return 0;
1139}
1140
1141static u32 bnx2x_get_link(struct net_device *dev)
1142{
1143 struct bnx2x *bp = netdev_priv(dev);
1144
f2e0899f 1145 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
de0c62db
DK
1146 return 0;
1147
1148 return bp->link_vars.link_up;
1149}
1150
1151static int bnx2x_get_eeprom_len(struct net_device *dev)
1152{
1153 struct bnx2x *bp = netdev_priv(dev);
1154
1155 return bp->common.flash_size;
1156}
1157
f16da43b
AE
1158/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had
1159 * we done things the other way around, if two pfs from the same port would
1160 * attempt to access nvram at the same time, we could run into a scenario such
1161 * as:
1162 * pf A takes the port lock.
1163 * pf B succeeds in taking the same lock since they are from the same port.
1164 * pf A takes the per pf misc lock. Performs eeprom access.
1165 * pf A finishes. Unlocks the per pf misc lock.
1166 * Pf B takes the lock and proceeds to perform it's own access.
1167 * pf A unlocks the per port lock, while pf B is still working (!).
1168 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
2de67439 1169 * access corrupted by pf B)
f16da43b 1170 */
de0c62db
DK
1171static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
1172{
1173 int port = BP_PORT(bp);
1174 int count, i;
f16da43b
AE
1175 u32 val;
1176
1177 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1178 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
de0c62db
DK
1179
1180 /* adjust timeout for emulation/FPGA */
754a2f52 1181 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1182 if (CHIP_REV_IS_SLOW(bp))
1183 count *= 100;
1184
1185 /* request access to nvram interface */
1186 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
1187 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1188
1189 for (i = 0; i < count*10; i++) {
1190 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
1191 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
1192 break;
1193
1194 udelay(5);
1195 }
1196
1197 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
51c1a580
MS
1198 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1199 "cannot get access to nvram interface\n");
de0c62db
DK
1200 return -EBUSY;
1201 }
1202
1203 return 0;
1204}
1205
1206static int bnx2x_release_nvram_lock(struct bnx2x *bp)
1207{
1208 int port = BP_PORT(bp);
1209 int count, i;
f16da43b 1210 u32 val;
de0c62db
DK
1211
1212 /* adjust timeout for emulation/FPGA */
754a2f52 1213 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1214 if (CHIP_REV_IS_SLOW(bp))
1215 count *= 100;
1216
1217 /* relinquish nvram interface */
1218 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
1219 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1220
1221 for (i = 0; i < count*10; i++) {
1222 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
1223 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
1224 break;
1225
1226 udelay(5);
1227 }
1228
1229 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
51c1a580
MS
1230 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1231 "cannot free access to nvram interface\n");
de0c62db
DK
1232 return -EBUSY;
1233 }
1234
f16da43b
AE
1235 /* release HW lock: protect against other PFs in PF Direct Assignment */
1236 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
de0c62db
DK
1237 return 0;
1238}
1239
1240static void bnx2x_enable_nvram_access(struct bnx2x *bp)
1241{
1242 u32 val;
1243
1244 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1245
1246 /* enable both bits, even on read */
1247 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1248 (val | MCPR_NVM_ACCESS_ENABLE_EN |
1249 MCPR_NVM_ACCESS_ENABLE_WR_EN));
1250}
1251
1252static void bnx2x_disable_nvram_access(struct bnx2x *bp)
1253{
1254 u32 val;
1255
1256 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1257
1258 /* disable both bits, even after read */
1259 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1260 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1261 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1262}
1263
1264static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
1265 u32 cmd_flags)
1266{
1267 int count, i, rc;
1268 u32 val;
1269
1270 /* build the command word */
1271 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1272
1273 /* need to clear DONE bit separately */
1274 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1275
1276 /* address of the NVRAM to read from */
1277 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
1278 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1279
1280 /* issue a read command */
1281 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1282
1283 /* adjust timeout for emulation/FPGA */
754a2f52 1284 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1285 if (CHIP_REV_IS_SLOW(bp))
1286 count *= 100;
1287
1288 /* wait for completion */
1289 *ret_val = 0;
1290 rc = -EBUSY;
1291 for (i = 0; i < count; i++) {
1292 udelay(5);
1293 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
1294
1295 if (val & MCPR_NVM_COMMAND_DONE) {
1296 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
1297 /* we read nvram data in cpu order
1298 * but ethtool sees it as an array of bytes
07ba6af4
MS
1299 * converting to big-endian will do the work
1300 */
de0c62db
DK
1301 *ret_val = cpu_to_be32(val);
1302 rc = 0;
1303 break;
1304 }
1305 }
51c1a580
MS
1306 if (rc == -EBUSY)
1307 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1308 "nvram read timeout expired\n");
de0c62db
DK
1309 return rc;
1310}
1311
1312static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
1313 int buf_size)
1314{
1315 int rc;
1316 u32 cmd_flags;
1317 __be32 val;
1318
1319 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
51c1a580 1320 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
de0c62db
DK
1321 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1322 offset, buf_size);
1323 return -EINVAL;
1324 }
1325
1326 if (offset + buf_size > bp->common.flash_size) {
51c1a580
MS
1327 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1328 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
de0c62db
DK
1329 offset, buf_size, bp->common.flash_size);
1330 return -EINVAL;
1331 }
1332
1333 /* request access to nvram interface */
1334 rc = bnx2x_acquire_nvram_lock(bp);
1335 if (rc)
1336 return rc;
1337
1338 /* enable access to nvram interface */
1339 bnx2x_enable_nvram_access(bp);
1340
1341 /* read the first word(s) */
1342 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1343 while ((buf_size > sizeof(u32)) && (rc == 0)) {
1344 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1345 memcpy(ret_buf, &val, 4);
1346
1347 /* advance to the next dword */
1348 offset += sizeof(u32);
1349 ret_buf += sizeof(u32);
1350 buf_size -= sizeof(u32);
1351 cmd_flags = 0;
1352 }
1353
1354 if (rc == 0) {
1355 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1356 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1357 memcpy(ret_buf, &val, 4);
1358 }
1359
1360 /* disable access to nvram interface */
1361 bnx2x_disable_nvram_access(bp);
1362 bnx2x_release_nvram_lock(bp);
1363
1364 return rc;
1365}
1366
85640952
DK
1367static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
1368 int buf_size)
1369{
1370 int rc;
1371
1372 rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
1373
1374 if (!rc) {
1375 __be32 *be = (__be32 *)buf;
1376
1377 while ((buf_size -= 4) >= 0)
1378 *buf++ = be32_to_cpu(*be++);
1379 }
1380
1381 return rc;
1382}
1383
de0c62db
DK
1384static int bnx2x_get_eeprom(struct net_device *dev,
1385 struct ethtool_eeprom *eeprom, u8 *eebuf)
1386{
1387 struct bnx2x *bp = netdev_priv(dev);
de0c62db 1388
51c1a580
MS
1389 if (!netif_running(dev)) {
1390 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1391 "cannot access eeprom when the interface is down\n");
de0c62db 1392 return -EAGAIN;
51c1a580 1393 }
de0c62db 1394
51c1a580 1395 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
f1deab50 1396 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
de0c62db
DK
1397 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1398 eeprom->len, eeprom->len);
1399
1400 /* parameters already validated in ethtool_get_eeprom */
1401
f1691dc6 1402 return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
de0c62db
DK
1403}
1404
24ea818e
YM
1405static int bnx2x_get_module_eeprom(struct net_device *dev,
1406 struct ethtool_eeprom *ee,
1407 u8 *data)
1408{
1409 struct bnx2x *bp = netdev_priv(dev);
669d6996 1410 int rc = -EINVAL, phy_idx;
24ea818e 1411 u8 *user_data = data;
669d6996 1412 unsigned int start_addr = ee->offset, xfer_size = 0;
24ea818e
YM
1413
1414 if (!netif_running(dev)) {
1415 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1416 "cannot access eeprom when the interface is down\n");
1417 return -EAGAIN;
1418 }
1419
1420 phy_idx = bnx2x_get_cur_phy_idx(bp);
669d6996
YR
1421
1422 /* Read A0 section */
1423 if (start_addr < ETH_MODULE_SFF_8079_LEN) {
1424 /* Limit transfer size to the A0 section boundary */
1425 if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
1426 xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
1427 else
1428 xfer_size = ee->len;
1429 bnx2x_acquire_phy_lock(bp);
24ea818e
YM
1430 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1431 &bp->link_params,
669d6996
YR
1432 I2C_DEV_ADDR_A0,
1433 start_addr,
24ea818e
YM
1434 xfer_size,
1435 user_data);
669d6996
YR
1436 bnx2x_release_phy_lock(bp);
1437 if (rc) {
1438 DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
1439
1440 return -EINVAL;
1441 }
24ea818e 1442 user_data += xfer_size;
669d6996 1443 start_addr += xfer_size;
24ea818e
YM
1444 }
1445
669d6996
YR
1446 /* Read A2 section */
1447 if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
1448 (start_addr < ETH_MODULE_SFF_8472_LEN)) {
1449 xfer_size = ee->len - xfer_size;
1450 /* Limit transfer size to the A2 section boundary */
1451 if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
1452 xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
1453 start_addr -= ETH_MODULE_SFF_8079_LEN;
1454 bnx2x_acquire_phy_lock(bp);
1455 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1456 &bp->link_params,
1457 I2C_DEV_ADDR_A2,
1458 start_addr,
1459 xfer_size,
1460 user_data);
1461 bnx2x_release_phy_lock(bp);
1462 if (rc) {
1463 DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
1464 return -EINVAL;
1465 }
1466 }
24ea818e
YM
1467 return rc;
1468}
1469
1470static int bnx2x_get_module_info(struct net_device *dev,
1471 struct ethtool_modinfo *modinfo)
1472{
1473 struct bnx2x *bp = netdev_priv(dev);
669d6996
YR
1474 int phy_idx, rc;
1475 u8 sff8472_comp, diag_type;
1476
24ea818e 1477 if (!netif_running(dev)) {
669d6996 1478 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
24ea818e
YM
1479 "cannot access eeprom when the interface is down\n");
1480 return -EAGAIN;
1481 }
24ea818e 1482 phy_idx = bnx2x_get_cur_phy_idx(bp);
669d6996
YR
1483 bnx2x_acquire_phy_lock(bp);
1484 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1485 &bp->link_params,
1486 I2C_DEV_ADDR_A0,
1487 SFP_EEPROM_SFF_8472_COMP_ADDR,
1488 SFP_EEPROM_SFF_8472_COMP_SIZE,
1489 &sff8472_comp);
1490 bnx2x_release_phy_lock(bp);
1491 if (rc) {
1492 DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
1493 return -EINVAL;
1494 }
1495
1496 bnx2x_acquire_phy_lock(bp);
1497 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1498 &bp->link_params,
1499 I2C_DEV_ADDR_A0,
1500 SFP_EEPROM_DIAG_TYPE_ADDR,
1501 SFP_EEPROM_DIAG_TYPE_SIZE,
1502 &diag_type);
1503 bnx2x_release_phy_lock(bp);
1504 if (rc) {
1505 DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
1506 return -EINVAL;
1507 }
1508
1509 if (!sff8472_comp ||
1510 (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
24ea818e
YM
1511 modinfo->type = ETH_MODULE_SFF_8079;
1512 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
669d6996
YR
1513 } else {
1514 modinfo->type = ETH_MODULE_SFF_8472;
1515 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
24ea818e 1516 }
669d6996 1517 return 0;
24ea818e
YM
1518}
1519
de0c62db
DK
1520static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
1521 u32 cmd_flags)
1522{
1523 int count, i, rc;
1524
1525 /* build the command word */
1526 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
1527
1528 /* need to clear DONE bit separately */
1529 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1530
1531 /* write the data */
1532 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
1533
1534 /* address of the NVRAM to write to */
1535 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
1536 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1537
1538 /* issue the write command */
1539 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1540
1541 /* adjust timeout for emulation/FPGA */
754a2f52 1542 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1543 if (CHIP_REV_IS_SLOW(bp))
1544 count *= 100;
1545
1546 /* wait for completion */
1547 rc = -EBUSY;
1548 for (i = 0; i < count; i++) {
1549 udelay(5);
1550 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
1551 if (val & MCPR_NVM_COMMAND_DONE) {
1552 rc = 0;
1553 break;
1554 }
1555 }
1556
51c1a580
MS
1557 if (rc == -EBUSY)
1558 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1559 "nvram write timeout expired\n");
de0c62db
DK
1560 return rc;
1561}
1562
1563#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1564
1565static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
1566 int buf_size)
1567{
1568 int rc;
30c20b67
DK
1569 u32 cmd_flags, align_offset, val;
1570 __be32 val_be;
de0c62db
DK
1571
1572 if (offset + buf_size > bp->common.flash_size) {
51c1a580
MS
1573 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1574 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
de0c62db
DK
1575 offset, buf_size, bp->common.flash_size);
1576 return -EINVAL;
1577 }
1578
1579 /* request access to nvram interface */
1580 rc = bnx2x_acquire_nvram_lock(bp);
1581 if (rc)
1582 return rc;
1583
1584 /* enable access to nvram interface */
1585 bnx2x_enable_nvram_access(bp);
1586
1587 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1588 align_offset = (offset & ~0x03);
30c20b67 1589 rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
de0c62db
DK
1590
1591 if (rc == 0) {
de0c62db 1592 /* nvram data is returned as an array of bytes
07ba6af4
MS
1593 * convert it back to cpu order
1594 */
30c20b67
DK
1595 val = be32_to_cpu(val_be);
1596
1597 val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset));
1598 val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset));
de0c62db
DK
1599
1600 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
1601 cmd_flags);
1602 }
1603
1604 /* disable access to nvram interface */
1605 bnx2x_disable_nvram_access(bp);
1606 bnx2x_release_nvram_lock(bp);
1607
1608 return rc;
1609}
1610
1611static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1612 int buf_size)
1613{
1614 int rc;
1615 u32 cmd_flags;
1616 u32 val;
1617 u32 written_so_far;
1618
1619 if (buf_size == 1) /* ethtool */
1620 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
1621
1622 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
51c1a580 1623 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
de0c62db
DK
1624 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1625 offset, buf_size);
1626 return -EINVAL;
1627 }
1628
1629 if (offset + buf_size > bp->common.flash_size) {
51c1a580
MS
1630 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1631 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
de0c62db
DK
1632 offset, buf_size, bp->common.flash_size);
1633 return -EINVAL;
1634 }
1635
1636 /* request access to nvram interface */
1637 rc = bnx2x_acquire_nvram_lock(bp);
1638 if (rc)
1639 return rc;
1640
1641 /* enable access to nvram interface */
1642 bnx2x_enable_nvram_access(bp);
1643
1644 written_so_far = 0;
1645 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1646 while ((written_so_far < buf_size) && (rc == 0)) {
1647 if (written_so_far == (buf_size - sizeof(u32)))
1648 cmd_flags |= MCPR_NVM_COMMAND_LAST;
754a2f52 1649 else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
de0c62db 1650 cmd_flags |= MCPR_NVM_COMMAND_LAST;
754a2f52 1651 else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
de0c62db
DK
1652 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1653
1654 memcpy(&val, data_buf, 4);
1655
1656 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
1657
1658 /* advance to the next dword */
1659 offset += sizeof(u32);
1660 data_buf += sizeof(u32);
1661 written_so_far += sizeof(u32);
1662 cmd_flags = 0;
1663 }
1664
1665 /* disable access to nvram interface */
1666 bnx2x_disable_nvram_access(bp);
1667 bnx2x_release_nvram_lock(bp);
1668
1669 return rc;
1670}
1671
1672static int bnx2x_set_eeprom(struct net_device *dev,
1673 struct ethtool_eeprom *eeprom, u8 *eebuf)
1674{
1675 struct bnx2x *bp = netdev_priv(dev);
1676 int port = BP_PORT(bp);
1677 int rc = 0;
e10bc84d 1678 u32 ext_phy_config;
51c1a580
MS
1679 if (!netif_running(dev)) {
1680 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1681 "cannot access eeprom when the interface is down\n");
de0c62db 1682 return -EAGAIN;
51c1a580 1683 }
de0c62db 1684
51c1a580 1685 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
f1deab50 1686 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
de0c62db
DK
1687 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1688 eeprom->len, eeprom->len);
1689
1690 /* parameters already validated in ethtool_set_eeprom */
1691
1692 /* PHY eeprom can be accessed only by the PMF */
1693 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
51c1a580
MS
1694 !bp->port.pmf) {
1695 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1696 "wrong magic or interface is not pmf\n");
de0c62db 1697 return -EINVAL;
51c1a580 1698 }
de0c62db 1699
e10bc84d
YR
1700 ext_phy_config =
1701 SHMEM_RD(bp,
1702 dev_info.port_hw_config[port].external_phy_config);
1703
de0c62db
DK
1704 if (eeprom->magic == 0x50485950) {
1705 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
1706 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1707
1708 bnx2x_acquire_phy_lock(bp);
1709 rc |= bnx2x_link_reset(&bp->link_params,
1710 &bp->link_vars, 0);
e10bc84d 1711 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
de0c62db
DK
1712 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
1713 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1714 MISC_REGISTERS_GPIO_HIGH, port);
1715 bnx2x_release_phy_lock(bp);
1716 bnx2x_link_report(bp);
1717
1718 } else if (eeprom->magic == 0x50485952) {
1719 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
1720 if (bp->state == BNX2X_STATE_OPEN) {
1721 bnx2x_acquire_phy_lock(bp);
1722 rc |= bnx2x_link_reset(&bp->link_params,
1723 &bp->link_vars, 1);
1724
1725 rc |= bnx2x_phy_init(&bp->link_params,
1726 &bp->link_vars);
1727 bnx2x_release_phy_lock(bp);
1728 bnx2x_calc_fc_adv(bp);
1729 }
1730 } else if (eeprom->magic == 0x53985943) {
1731 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
e10bc84d 1732 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
de0c62db 1733 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
de0c62db
DK
1734
1735 /* DSP Remove Download Mode */
1736 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1737 MISC_REGISTERS_GPIO_LOW, port);
1738
1739 bnx2x_acquire_phy_lock(bp);
1740
e10bc84d
YR
1741 bnx2x_sfx7101_sp_sw_reset(bp,
1742 &bp->link_params.phy[EXT_PHY1]);
de0c62db
DK
1743
1744 /* wait 0.5 sec to allow it to run */
1745 msleep(500);
1746 bnx2x_ext_phy_hw_reset(bp, port);
1747 msleep(500);
1748 bnx2x_release_phy_lock(bp);
1749 }
1750 } else
1751 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
1752
1753 return rc;
1754}
f85582f8 1755
de0c62db
DK
1756static int bnx2x_get_coalesce(struct net_device *dev,
1757 struct ethtool_coalesce *coal)
1758{
1759 struct bnx2x *bp = netdev_priv(dev);
1760
1761 memset(coal, 0, sizeof(struct ethtool_coalesce));
1762
1763 coal->rx_coalesce_usecs = bp->rx_ticks;
1764 coal->tx_coalesce_usecs = bp->tx_ticks;
1765
1766 return 0;
1767}
1768
1769static int bnx2x_set_coalesce(struct net_device *dev,
1770 struct ethtool_coalesce *coal)
1771{
1772 struct bnx2x *bp = netdev_priv(dev);
1773
1774 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
1775 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
1776 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
1777
1778 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
1779 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
1780 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
1781
1782 if (netif_running(dev))
1783 bnx2x_update_coalesce(bp);
1784
1785 return 0;
1786}
1787
1788static void bnx2x_get_ringparam(struct net_device *dev,
1789 struct ethtool_ringparam *ering)
1790{
1791 struct bnx2x *bp = netdev_priv(dev);
1792
1793 ering->rx_max_pending = MAX_RX_AVAIL;
de0c62db 1794
25141580
DK
1795 if (bp->rx_ring_size)
1796 ering->rx_pending = bp->rx_ring_size;
1797 else
c2188952 1798 ering->rx_pending = MAX_RX_AVAIL;
25141580 1799
a3348722 1800 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
de0c62db
DK
1801 ering->tx_pending = bp->tx_ring_size;
1802}
1803
1804static int bnx2x_set_ringparam(struct net_device *dev,
1805 struct ethtool_ringparam *ering)
1806{
1807 struct bnx2x *bp = netdev_priv(dev);
de0c62db 1808
04c46736
YM
1809 DP(BNX2X_MSG_ETHTOOL,
1810 "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
1811 ering->rx_pending, ering->tx_pending);
1812
de0c62db 1813 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580
MS
1814 DP(BNX2X_MSG_ETHTOOL,
1815 "Handling parity error recovery. Try again later\n");
de0c62db
DK
1816 return -EAGAIN;
1817 }
1818
1819 if ((ering->rx_pending > MAX_RX_AVAIL) ||
b3b83c3f
DK
1820 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1821 MIN_RX_SIZE_TPA)) ||
a3348722 1822 (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
51c1a580
MS
1823 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
1824 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
de0c62db 1825 return -EINVAL;
51c1a580 1826 }
de0c62db
DK
1827
1828 bp->rx_ring_size = ering->rx_pending;
1829 bp->tx_ring_size = ering->tx_pending;
1830
a9fccec7 1831 return bnx2x_reload_if_running(dev);
de0c62db
DK
1832}
1833
1834static void bnx2x_get_pauseparam(struct net_device *dev,
1835 struct ethtool_pauseparam *epause)
1836{
1837 struct bnx2x *bp = netdev_priv(dev);
a22f0788 1838 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
9e7e8399
MY
1839 int cfg_reg;
1840
a22f0788
YR
1841 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
1842 BNX2X_FLOW_CTRL_AUTO);
de0c62db 1843
9e7e8399 1844 if (!epause->autoneg)
241fb5d2 1845 cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
9e7e8399
MY
1846 else
1847 cfg_reg = bp->link_params.req_fc_auto_adv;
1848
1849 epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
de0c62db 1850 BNX2X_FLOW_CTRL_RX);
9e7e8399 1851 epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
de0c62db
DK
1852 BNX2X_FLOW_CTRL_TX);
1853
51c1a580 1854 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
f1deab50 1855 " autoneg %d rx_pause %d tx_pause %d\n",
de0c62db
DK
1856 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1857}
1858
1859static int bnx2x_set_pauseparam(struct net_device *dev,
1860 struct ethtool_pauseparam *epause)
1861{
1862 struct bnx2x *bp = netdev_priv(dev);
a22f0788 1863 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
fb3bff17 1864 if (IS_MF(bp))
de0c62db
DK
1865 return 0;
1866
51c1a580 1867 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
f1deab50 1868 " autoneg %d rx_pause %d tx_pause %d\n",
de0c62db
DK
1869 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1870
a22f0788 1871 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
de0c62db
DK
1872
1873 if (epause->rx_pause)
a22f0788 1874 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
de0c62db
DK
1875
1876 if (epause->tx_pause)
a22f0788 1877 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
de0c62db 1878
a22f0788
YR
1879 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
1880 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
de0c62db
DK
1881
1882 if (epause->autoneg) {
a22f0788 1883 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
51c1a580 1884 DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
de0c62db
DK
1885 return -EINVAL;
1886 }
1887
a22f0788
YR
1888 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
1889 bp->link_params.req_flow_ctrl[cfg_idx] =
1890 BNX2X_FLOW_CTRL_AUTO;
1891 }
ba35a0fd 1892 bp->link_params.req_fc_auto_adv = 0;
5cd75f0c
YR
1893 if (epause->rx_pause)
1894 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
1895
1896 if (epause->tx_pause)
1897 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
ba35a0fd
YR
1898
1899 if (!bp->link_params.req_fc_auto_adv)
1900 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
de0c62db
DK
1901 }
1902
51c1a580 1903 DP(BNX2X_MSG_ETHTOOL,
a22f0788 1904 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
de0c62db
DK
1905
1906 if (netif_running(dev)) {
1907 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1908 bnx2x_link_set(bp);
1909 }
1910
1911 return 0;
1912}
1913
5889335c 1914static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
cf2c1df6
MS
1915 "register_test (offline) ",
1916 "memory_test (offline) ",
1917 "int_loopback_test (offline)",
1918 "ext_loopback_test (offline)",
1919 "nvram_test (online) ",
1920 "interrupt_test (online) ",
1921 "link_test (online) "
de0c62db
DK
1922};
1923
e9939c80
YM
1924static u32 bnx2x_eee_to_adv(u32 eee_adv)
1925{
1926 u32 modes = 0;
1927
1928 if (eee_adv & SHMEM_EEE_100M_ADV)
1929 modes |= ADVERTISED_100baseT_Full;
1930 if (eee_adv & SHMEM_EEE_1G_ADV)
1931 modes |= ADVERTISED_1000baseT_Full;
1932 if (eee_adv & SHMEM_EEE_10G_ADV)
1933 modes |= ADVERTISED_10000baseT_Full;
1934
1935 return modes;
1936}
1937
1938static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
1939{
1940 u32 eee_adv = 0;
1941 if (modes & ADVERTISED_100baseT_Full)
1942 eee_adv |= SHMEM_EEE_100M_ADV;
1943 if (modes & ADVERTISED_1000baseT_Full)
1944 eee_adv |= SHMEM_EEE_1G_ADV;
1945 if (modes & ADVERTISED_10000baseT_Full)
1946 eee_adv |= SHMEM_EEE_10G_ADV;
1947
1948 return eee_adv << shift;
1949}
1950
1951static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1952{
1953 struct bnx2x *bp = netdev_priv(dev);
1954 u32 eee_cfg;
1955
1956 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
1957 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
1958 return -EOPNOTSUPP;
1959 }
1960
08e9acc2 1961 eee_cfg = bp->link_vars.eee_status;
e9939c80
YM
1962
1963 edata->supported =
1964 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
1965 SHMEM_EEE_SUPPORTED_SHIFT);
1966
1967 edata->advertised =
1968 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
1969 SHMEM_EEE_ADV_STATUS_SHIFT);
1970 edata->lp_advertised =
1971 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
1972 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
1973
1974 /* SHMEM value is in 16u units --> Convert to 1u units. */
1975 edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
1976
1977 edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
1978 edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
1979 edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
1980
1981 return 0;
1982}
1983
1984static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1985{
1986 struct bnx2x *bp = netdev_priv(dev);
1987 u32 eee_cfg;
1988 u32 advertised;
1989
1990 if (IS_MF(bp))
1991 return 0;
1992
1993 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
1994 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
1995 return -EOPNOTSUPP;
1996 }
1997
08e9acc2 1998 eee_cfg = bp->link_vars.eee_status;
e9939c80
YM
1999
2000 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
2001 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
2002 return -EOPNOTSUPP;
2003 }
2004
2005 advertised = bnx2x_adv_to_eee(edata->advertised,
2006 SHMEM_EEE_ADV_STATUS_SHIFT);
2007 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
2008 DP(BNX2X_MSG_ETHTOOL,
efc7ce03 2009 "Direct manipulation of EEE advertisement is not supported\n");
e9939c80
YM
2010 return -EINVAL;
2011 }
2012
2013 if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
2014 DP(BNX2X_MSG_ETHTOOL,
2015 "Maximal Tx Lpi timer supported is %x(u)\n",
2016 EEE_MODE_TIMER_MASK);
2017 return -EINVAL;
2018 }
2019 if (edata->tx_lpi_enabled &&
2020 (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
2021 DP(BNX2X_MSG_ETHTOOL,
2022 "Minimal Tx Lpi timer supported is %d(u)\n",
2023 EEE_MODE_NVRAM_AGGRESSIVE_TIME);
2024 return -EINVAL;
2025 }
2026
2027 /* All is well; Apply changes*/
2028 if (edata->eee_enabled)
2029 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
2030 else
2031 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
2032
2033 if (edata->tx_lpi_enabled)
2034 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
2035 else
2036 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
2037
2038 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
2039 bp->link_params.eee_mode |= (edata->tx_lpi_timer &
2040 EEE_MODE_TIMER_MASK) |
2041 EEE_MODE_OVERRIDE_NVRAM |
2042 EEE_MODE_OUTPUT_TIME;
2043
2044 /* Restart link to propogate changes */
2045 if (netif_running(dev)) {
2046 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
5d07d868 2047 bnx2x_force_link_reset(bp);
e9939c80
YM
2048 bnx2x_link_set(bp);
2049 }
2050
2051 return 0;
2052}
2053
619c5cb6
VZ
2054enum {
2055 BNX2X_CHIP_E1_OFST = 0,
2056 BNX2X_CHIP_E1H_OFST,
2057 BNX2X_CHIP_E2_OFST,
2058 BNX2X_CHIP_E3_OFST,
2059 BNX2X_CHIP_E3B0_OFST,
2060 BNX2X_CHIP_MAX_OFST
2061};
2062
2063#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
2064#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
2065#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
2066#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
2067#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
2068
2069#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
2070#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
2071
de0c62db
DK
2072static int bnx2x_test_registers(struct bnx2x *bp)
2073{
2074 int idx, i, rc = -ENODEV;
619c5cb6 2075 u32 wr_val = 0, hw;
de0c62db
DK
2076 int port = BP_PORT(bp);
2077 static const struct {
619c5cb6 2078 u32 hw;
de0c62db
DK
2079 u32 offset0;
2080 u32 offset1;
2081 u32 mask;
2082 } reg_tbl[] = {
619c5cb6
VZ
2083/* 0 */ { BNX2X_CHIP_MASK_ALL,
2084 BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
2085 { BNX2X_CHIP_MASK_ALL,
2086 DORQ_REG_DB_ADDR0, 4, 0xffffffff },
2087 { BNX2X_CHIP_MASK_E1X,
2088 HC_REG_AGG_INT_0, 4, 0x000003ff },
2089 { BNX2X_CHIP_MASK_ALL,
2090 PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
2091 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
2092 PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
2093 { BNX2X_CHIP_MASK_E3B0,
2094 PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
2095 { BNX2X_CHIP_MASK_ALL,
2096 PRS_REG_CID_PORT_0, 4, 0x00ffffff },
2097 { BNX2X_CHIP_MASK_ALL,
2098 PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
2099 { BNX2X_CHIP_MASK_ALL,
2100 PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
2101 { BNX2X_CHIP_MASK_ALL,
2102 PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
2103/* 10 */ { BNX2X_CHIP_MASK_ALL,
2104 PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
2105 { BNX2X_CHIP_MASK_ALL,
2106 PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
2107 { BNX2X_CHIP_MASK_ALL,
2108 QM_REG_CONNNUM_0, 4, 0x000fffff },
2109 { BNX2X_CHIP_MASK_ALL,
2110 TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
2111 { BNX2X_CHIP_MASK_ALL,
2112 SRC_REG_KEYRSS0_0, 40, 0xffffffff },
2113 { BNX2X_CHIP_MASK_ALL,
2114 SRC_REG_KEYRSS0_7, 40, 0xffffffff },
2115 { BNX2X_CHIP_MASK_ALL,
2116 XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
2117 { BNX2X_CHIP_MASK_ALL,
2118 XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
2119 { BNX2X_CHIP_MASK_ALL,
2120 XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
2121 { BNX2X_CHIP_MASK_ALL,
2122 NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
2123/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2124 NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
2125 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2126 NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
2127 { BNX2X_CHIP_MASK_ALL,
2128 NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
2129 { BNX2X_CHIP_MASK_ALL,
2130 NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
2131 { BNX2X_CHIP_MASK_ALL,
2132 NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
2133 { BNX2X_CHIP_MASK_ALL,
2134 NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
2135 { BNX2X_CHIP_MASK_ALL,
2136 NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
2137 { BNX2X_CHIP_MASK_ALL,
2138 NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
2139 { BNX2X_CHIP_MASK_ALL,
2140 NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
2141 { BNX2X_CHIP_MASK_ALL,
2142 NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
2143/* 30 */ { BNX2X_CHIP_MASK_ALL,
2144 NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
2145 { BNX2X_CHIP_MASK_ALL,
2146 NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
2147 { BNX2X_CHIP_MASK_ALL,
2148 NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
2149 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2150 NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
2151 { BNX2X_CHIP_MASK_ALL,
2152 NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
2153 { BNX2X_CHIP_MASK_ALL,
2154 NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
2155 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2156 NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
2157 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2158 NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
2159
2160 { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
de0c62db
DK
2161 };
2162
51c1a580
MS
2163 if (!netif_running(bp->dev)) {
2164 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2165 "cannot access eeprom when the interface is down\n");
de0c62db 2166 return rc;
51c1a580 2167 }
de0c62db 2168
619c5cb6
VZ
2169 if (CHIP_IS_E1(bp))
2170 hw = BNX2X_CHIP_MASK_E1;
2171 else if (CHIP_IS_E1H(bp))
2172 hw = BNX2X_CHIP_MASK_E1H;
2173 else if (CHIP_IS_E2(bp))
2174 hw = BNX2X_CHIP_MASK_E2;
2175 else if (CHIP_IS_E3B0(bp))
2176 hw = BNX2X_CHIP_MASK_E3B0;
2177 else /* e3 A0 */
2178 hw = BNX2X_CHIP_MASK_E3;
2179
de0c62db 2180 /* Repeat the test twice:
07ba6af4
MS
2181 * First by writing 0x00000000, second by writing 0xffffffff
2182 */
de0c62db
DK
2183 for (idx = 0; idx < 2; idx++) {
2184
2185 switch (idx) {
2186 case 0:
2187 wr_val = 0;
2188 break;
2189 case 1:
2190 wr_val = 0xffffffff;
2191 break;
2192 }
2193
2194 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
2195 u32 offset, mask, save_val, val;
619c5cb6 2196 if (!(hw & reg_tbl[i].hw))
f2e0899f 2197 continue;
de0c62db
DK
2198
2199 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
2200 mask = reg_tbl[i].mask;
2201
2202 save_val = REG_RD(bp, offset);
2203
ec6ba945 2204 REG_WR(bp, offset, wr_val & mask);
f85582f8 2205
de0c62db
DK
2206 val = REG_RD(bp, offset);
2207
2208 /* Restore the original register's value */
2209 REG_WR(bp, offset, save_val);
2210
2211 /* verify value is as expected */
2212 if ((val & mask) != (wr_val & mask)) {
51c1a580 2213 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
2214 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
2215 offset, val, wr_val, mask);
2216 goto test_reg_exit;
2217 }
2218 }
2219 }
2220
2221 rc = 0;
2222
2223test_reg_exit:
2224 return rc;
2225}
2226
2227static int bnx2x_test_memory(struct bnx2x *bp)
2228{
2229 int i, j, rc = -ENODEV;
619c5cb6 2230 u32 val, index;
de0c62db
DK
2231 static const struct {
2232 u32 offset;
2233 int size;
2234 } mem_tbl[] = {
2235 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
2236 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
2237 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
2238 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
2239 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
2240 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
2241 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
2242
2243 { 0xffffffff, 0 }
2244 };
619c5cb6 2245
de0c62db
DK
2246 static const struct {
2247 char *name;
2248 u32 offset;
619c5cb6 2249 u32 hw_mask[BNX2X_CHIP_MAX_OFST];
de0c62db 2250 } prty_tbl[] = {
619c5cb6
VZ
2251 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
2252 {0x3ffc0, 0, 0, 0} },
2253 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
2254 {0x2, 0x2, 0, 0} },
2255 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
2256 {0, 0, 0, 0} },
2257 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
2258 {0x3ffc0, 0, 0, 0} },
2259 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
2260 {0x3ffc0, 0, 0, 0} },
2261 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
2262 {0x3ffc1, 0, 0, 0} },
2263
2264 { NULL, 0xffffffff, {0, 0, 0, 0} }
de0c62db
DK
2265 };
2266
51c1a580
MS
2267 if (!netif_running(bp->dev)) {
2268 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2269 "cannot access eeprom when the interface is down\n");
de0c62db 2270 return rc;
51c1a580 2271 }
de0c62db 2272
619c5cb6
VZ
2273 if (CHIP_IS_E1(bp))
2274 index = BNX2X_CHIP_E1_OFST;
2275 else if (CHIP_IS_E1H(bp))
2276 index = BNX2X_CHIP_E1H_OFST;
2277 else if (CHIP_IS_E2(bp))
2278 index = BNX2X_CHIP_E2_OFST;
2279 else /* e3 */
2280 index = BNX2X_CHIP_E3_OFST;
2281
f2e0899f
DK
2282 /* pre-Check the parity status */
2283 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
2284 val = REG_RD(bp, prty_tbl[i].offset);
619c5cb6 2285 if (val & ~(prty_tbl[i].hw_mask[index])) {
51c1a580 2286 DP(BNX2X_MSG_ETHTOOL,
f2e0899f
DK
2287 "%s is 0x%x\n", prty_tbl[i].name, val);
2288 goto test_mem_exit;
2289 }
2290 }
2291
de0c62db
DK
2292 /* Go through all the memories */
2293 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
2294 for (j = 0; j < mem_tbl[i].size; j++)
2295 REG_RD(bp, mem_tbl[i].offset + j*4);
2296
2297 /* Check the parity status */
2298 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
2299 val = REG_RD(bp, prty_tbl[i].offset);
619c5cb6 2300 if (val & ~(prty_tbl[i].hw_mask[index])) {
51c1a580 2301 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
2302 "%s is 0x%x\n", prty_tbl[i].name, val);
2303 goto test_mem_exit;
2304 }
2305 }
2306
2307 rc = 0;
2308
2309test_mem_exit:
2310 return rc;
2311}
2312
a22f0788 2313static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
de0c62db 2314{
f2e0899f 2315 int cnt = 1400;
de0c62db 2316
619c5cb6 2317 if (link_up) {
a22f0788 2318 while (bnx2x_link_test(bp, is_serdes) && cnt--)
619c5cb6
VZ
2319 msleep(20);
2320
2321 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
51c1a580 2322 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
8970b2e4
MS
2323
2324 cnt = 1400;
2325 while (!bp->link_vars.link_up && cnt--)
2326 msleep(20);
2327
2328 if (cnt <= 0 && !bp->link_vars.link_up)
2329 DP(BNX2X_MSG_ETHTOOL,
2330 "Timeout waiting for link init\n");
619c5cb6 2331 }
de0c62db
DK
2332}
2333
619c5cb6 2334static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
de0c62db
DK
2335{
2336 unsigned int pkt_size, num_pkts, i;
2337 struct sk_buff *skb;
2338 unsigned char *packet;
2339 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
2340 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
65565884 2341 struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
de0c62db
DK
2342 u16 tx_start_idx, tx_idx;
2343 u16 rx_start_idx, rx_idx;
b0700b1e 2344 u16 pkt_prod, bd_prod;
de0c62db
DK
2345 struct sw_tx_bd *tx_buf;
2346 struct eth_tx_start_bd *tx_start_bd;
de0c62db
DK
2347 dma_addr_t mapping;
2348 union eth_rx_cqe *cqe;
619c5cb6 2349 u8 cqe_fp_flags, cqe_fp_type;
de0c62db
DK
2350 struct sw_rx_bd *rx_buf;
2351 u16 len;
2352 int rc = -ENODEV;
e52fcb24 2353 u8 *data;
8970b2e4
MS
2354 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
2355 txdata->txq_index);
de0c62db
DK
2356
2357 /* check the loopback mode */
2358 switch (loopback_mode) {
2359 case BNX2X_PHY_LOOPBACK:
8970b2e4
MS
2360 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
2361 DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
de0c62db 2362 return -EINVAL;
8970b2e4 2363 }
de0c62db
DK
2364 break;
2365 case BNX2X_MAC_LOOPBACK:
32911333
YR
2366 if (CHIP_IS_E3(bp)) {
2367 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
2368 if (bp->port.supported[cfg_idx] &
2369 (SUPPORTED_10000baseT_Full |
2370 SUPPORTED_20000baseMLD2_Full |
2371 SUPPORTED_20000baseKR2_Full))
2372 bp->link_params.loopback_mode = LOOPBACK_XMAC;
2373 else
2374 bp->link_params.loopback_mode = LOOPBACK_UMAC;
2375 } else
2376 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2377
de0c62db
DK
2378 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2379 break;
8970b2e4
MS
2380 case BNX2X_EXT_LOOPBACK:
2381 if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
2382 DP(BNX2X_MSG_ETHTOOL,
2383 "Can't configure external loopback\n");
2384 return -EINVAL;
2385 }
2386 break;
de0c62db 2387 default:
51c1a580 2388 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
de0c62db
DK
2389 return -EINVAL;
2390 }
2391
2392 /* prepare the loopback packet */
2393 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
2394 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
a8c94b91 2395 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
de0c62db 2396 if (!skb) {
51c1a580 2397 DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
de0c62db
DK
2398 rc = -ENOMEM;
2399 goto test_loopback_exit;
2400 }
2401 packet = skb_put(skb, pkt_size);
2402 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
2403 memset(packet + ETH_ALEN, 0, ETH_ALEN);
2404 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
2405 for (i = ETH_HLEN; i < pkt_size; i++)
2406 packet[i] = (unsigned char) (i & 0xff);
619c5cb6
VZ
2407 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2408 skb_headlen(skb), DMA_TO_DEVICE);
2409 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2410 rc = -ENOMEM;
2411 dev_kfree_skb(skb);
51c1a580 2412 DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
619c5cb6
VZ
2413 goto test_loopback_exit;
2414 }
de0c62db
DK
2415
2416 /* send the loopback packet */
2417 num_pkts = 0;
6383c0b3 2418 tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
de0c62db
DK
2419 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
2420
73dbb5e1
DK
2421 netdev_tx_sent_queue(txq, skb->len);
2422
6383c0b3
AE
2423 pkt_prod = txdata->tx_pkt_prod++;
2424 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2425 tx_buf->first_bd = txdata->tx_bd_prod;
de0c62db
DK
2426 tx_buf->skb = skb;
2427 tx_buf->flags = 0;
2428
6383c0b3
AE
2429 bd_prod = TX_BD(txdata->tx_bd_prod);
2430 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
de0c62db
DK
2431 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2432 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2433 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
2434 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
523224a3 2435 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
de0c62db 2436 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
523224a3
DK
2437 SET_FLAG(tx_start_bd->general_data,
2438 ETH_TX_START_BD_HDR_NBDS,
2439 1);
96bed4b9
YM
2440 SET_FLAG(tx_start_bd->general_data,
2441 ETH_TX_START_BD_PARSE_NBDS,
2442 0);
de0c62db
DK
2443
2444 /* turn on parsing and get a BD */
2445 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
f85582f8 2446
96bed4b9
YM
2447 if (CHIP_IS_E1x(bp)) {
2448 u16 global_data = 0;
2449 struct eth_tx_parse_bd_e1x *pbd_e1x =
2450 &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2451 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2452 SET_FLAG(global_data,
2453 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2454 pbd_e1x->global_data = cpu_to_le16(global_data);
2455 } else {
2456 u32 parsing_data = 0;
2457 struct eth_tx_parse_bd_e2 *pbd_e2 =
2458 &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2459 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2460 SET_FLAG(parsing_data,
2461 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2462 pbd_e2->parsing_data = cpu_to_le32(parsing_data);
2463 }
de0c62db
DK
2464 wmb();
2465
6383c0b3 2466 txdata->tx_db.data.prod += 2;
de0c62db 2467 barrier();
6383c0b3 2468 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
de0c62db
DK
2469
2470 mmiowb();
619c5cb6 2471 barrier();
de0c62db
DK
2472
2473 num_pkts++;
6383c0b3 2474 txdata->tx_bd_prod += 2; /* start + pbd */
de0c62db
DK
2475
2476 udelay(100);
2477
6383c0b3 2478 tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
de0c62db
DK
2479 if (tx_idx != tx_start_idx + num_pkts)
2480 goto test_loopback_exit;
2481
f2e0899f
DK
2482 /* Unlike HC IGU won't generate an interrupt for status block
2483 * updates that have been performed while interrupts were
2484 * disabled.
2485 */
e1210d12
ED
2486 if (bp->common.int_block == INT_BLOCK_IGU) {
2487 /* Disable local BHes to prevent a dead-lock situation between
2488 * sch_direct_xmit() and bnx2x_run_loopback() (calling
2489 * bnx2x_tx_int()), as both are taking netif_tx_lock().
2490 */
2491 local_bh_disable();
6383c0b3 2492 bnx2x_tx_int(bp, txdata);
e1210d12
ED
2493 local_bh_enable();
2494 }
f2e0899f 2495
de0c62db
DK
2496 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
2497 if (rx_idx != rx_start_idx + num_pkts)
2498 goto test_loopback_exit;
2499
b0700b1e 2500 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
de0c62db 2501 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
619c5cb6
VZ
2502 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
2503 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
de0c62db
DK
2504 goto test_loopback_rx_exit;
2505
621b4d66 2506 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
de0c62db
DK
2507 if (len != pkt_size)
2508 goto test_loopback_rx_exit;
2509
2510 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9924cafc 2511 dma_sync_single_for_cpu(&bp->pdev->dev,
619c5cb6
VZ
2512 dma_unmap_addr(rx_buf, mapping),
2513 fp_rx->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 2514 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
de0c62db 2515 for (i = ETH_HLEN; i < pkt_size; i++)
e52fcb24 2516 if (*(data + i) != (unsigned char) (i & 0xff))
de0c62db
DK
2517 goto test_loopback_rx_exit;
2518
2519 rc = 0;
2520
2521test_loopback_rx_exit:
2522
2523 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
2524 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
2525 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
2526 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
2527
2528 /* Update producers */
2529 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
2530 fp_rx->rx_sge_prod);
2531
2532test_loopback_exit:
2533 bp->link_params.loopback_mode = LOOPBACK_NONE;
2534
2535 return rc;
2536}
2537
619c5cb6 2538static int bnx2x_test_loopback(struct bnx2x *bp)
de0c62db
DK
2539{
2540 int rc = 0, res;
2541
2542 if (BP_NOMCP(bp))
2543 return rc;
2544
2545 if (!netif_running(bp->dev))
2546 return BNX2X_LOOPBACK_FAILED;
2547
2548 bnx2x_netif_stop(bp, 1);
2549 bnx2x_acquire_phy_lock(bp);
2550
619c5cb6 2551 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
de0c62db 2552 if (res) {
51c1a580 2553 DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res);
de0c62db
DK
2554 rc |= BNX2X_PHY_LOOPBACK_FAILED;
2555 }
2556
619c5cb6 2557 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
de0c62db 2558 if (res) {
51c1a580 2559 DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res);
de0c62db
DK
2560 rc |= BNX2X_MAC_LOOPBACK_FAILED;
2561 }
2562
2563 bnx2x_release_phy_lock(bp);
2564 bnx2x_netif_start(bp);
2565
2566 return rc;
2567}
2568
8970b2e4
MS
2569static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2570{
2571 int rc;
2572 u8 is_serdes =
2573 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2574
2575 if (BP_NOMCP(bp))
2576 return -ENODEV;
2577
2578 if (!netif_running(bp->dev))
2579 return BNX2X_EXT_LOOPBACK_FAILED;
2580
5d07d868 2581 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
8970b2e4
MS
2582 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2583 if (rc) {
2584 DP(BNX2X_MSG_ETHTOOL,
2585 "Can't perform self-test, nic_load (for external lb) failed\n");
2586 return -ENODEV;
2587 }
2588 bnx2x_wait_for_link(bp, 1, is_serdes);
2589
2590 bnx2x_netif_stop(bp, 1);
2591
2592 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
2593 if (rc)
2594 DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
2595
2596 bnx2x_netif_start(bp);
2597
2598 return rc;
2599}
2600
edb944d2
DK
2601struct code_entry {
2602 u32 sram_start_addr;
2603 u32 code_attribute;
2604#define CODE_IMAGE_TYPE_MASK 0xf0800003
2605#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
2606#define CODE_IMAGE_LENGTH_MASK 0x007ffffc
2607#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
2608 u32 nvm_start_addr;
2609};
2610
2611#define CODE_ENTRY_MAX 16
2612#define CODE_ENTRY_EXTENDED_DIR_IDX 15
2613#define MAX_IMAGES_IN_EXTENDED_DIR 64
2614#define NVRAM_DIR_OFFSET 0x14
2615
2616#define EXTENDED_DIR_EXISTS(code) \
2617 ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
2618 (code & CODE_IMAGE_LENGTH_MASK) != 0)
2619
de0c62db 2620#define CRC32_RESIDUAL 0xdebb20e3
edb944d2
DK
2621#define CRC_BUFF_SIZE 256
2622
2623static int bnx2x_nvram_crc(struct bnx2x *bp,
2624 int offset,
2625 int size,
2626 u8 *buff)
2627{
2628 u32 crc = ~0;
2629 int rc = 0, done = 0;
2630
2631 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2632 "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
2633
2634 while (done < size) {
2635 int count = min_t(int, size - done, CRC_BUFF_SIZE);
2636
2637 rc = bnx2x_nvram_read(bp, offset + done, buff, count);
2638
2639 if (rc)
2640 return rc;
2641
2642 crc = crc32_le(crc, buff, count);
2643 done += count;
2644 }
2645
2646 if (crc != CRC32_RESIDUAL)
2647 rc = -EINVAL;
2648
2649 return rc;
2650}
2651
2652static int bnx2x_test_nvram_dir(struct bnx2x *bp,
2653 struct code_entry *entry,
2654 u8 *buff)
2655{
2656 size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
2657 u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
2658 int rc;
2659
2660 /* Zero-length images and AFEX profiles do not have CRC */
2661 if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
2662 return 0;
2663
2664 rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
2665 if (rc)
2666 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2667 "image %x has failed crc test (rc %d)\n", type, rc);
2668
2669 return rc;
2670}
2671
2672static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
2673{
2674 int rc;
2675 struct code_entry entry;
2676
2677 rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
2678 if (rc)
2679 return rc;
2680
2681 return bnx2x_test_nvram_dir(bp, &entry, buff);
2682}
2683
2684static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
2685{
2686 u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
2687 struct code_entry entry;
2688 int i;
2689
2690 rc = bnx2x_nvram_read32(bp,
2691 dir_offset +
2692 sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
2693 (u32 *)&entry, sizeof(entry));
2694 if (rc)
2695 return rc;
2696
2697 if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
2698 return 0;
2699
2700 rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
2701 &cnt, sizeof(u32));
2702 if (rc)
2703 return rc;
2704
2705 dir_offset = entry.nvm_start_addr + 8;
2706
2707 for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
2708 rc = bnx2x_test_dir_entry(bp, dir_offset +
2709 sizeof(struct code_entry) * i,
2710 buff);
2711 if (rc)
2712 return rc;
2713 }
2714
2715 return 0;
2716}
2717
2718static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
2719{
2720 u32 rc, dir_offset = NVRAM_DIR_OFFSET;
2721 int i;
2722
2723 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
2724
2725 for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
2726 rc = bnx2x_test_dir_entry(bp, dir_offset +
2727 sizeof(struct code_entry) * i,
2728 buff);
2729 if (rc)
2730 return rc;
2731 }
2732
2733 return bnx2x_test_nvram_ext_dirs(bp, buff);
2734}
2735
2736struct crc_pair {
2737 int offset;
2738 int size;
2739};
2740
2741static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
2742 const struct crc_pair *nvram_tbl, u8 *buf)
2743{
2744 int i;
2745
2746 for (i = 0; nvram_tbl[i].size; i++) {
2747 int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
2748 nvram_tbl[i].size, buf);
2749 if (rc) {
2750 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2751 "nvram_tbl[%d] has failed crc test (rc %d)\n",
2752 i, rc);
2753 return rc;
2754 }
2755 }
2756
2757 return 0;
2758}
de0c62db
DK
2759
2760static int bnx2x_test_nvram(struct bnx2x *bp)
2761{
edb944d2 2762 const struct crc_pair nvram_tbl[] = {
de0c62db
DK
2763 { 0, 0x14 }, /* bootstrap */
2764 { 0x14, 0xec }, /* dir */
2765 { 0x100, 0x350 }, /* manuf_info */
2766 { 0x450, 0xf0 }, /* feature_info */
2767 { 0x640, 0x64 }, /* upgrade_key_info */
de0c62db 2768 { 0x708, 0x70 }, /* manuf_key_info */
de0c62db
DK
2769 { 0, 0 }
2770 };
edb944d2
DK
2771 const struct crc_pair nvram_tbl2[] = {
2772 { 0x7e8, 0x350 }, /* manuf_info2 */
2773 { 0xb38, 0xf0 }, /* feature_info */
2774 { 0, 0 }
2775 };
2776
85640952 2777 u8 *buf;
edb944d2
DK
2778 int rc;
2779 u32 magic;
de0c62db
DK
2780
2781 if (BP_NOMCP(bp))
2782 return 0;
2783
edb944d2 2784 buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
afa13b4b 2785 if (!buf) {
51c1a580 2786 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
afa13b4b
MY
2787 rc = -ENOMEM;
2788 goto test_nvram_exit;
2789 }
afa13b4b 2790
85640952 2791 rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
de0c62db 2792 if (rc) {
51c1a580
MS
2793 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2794 "magic value read (rc %d)\n", rc);
de0c62db
DK
2795 goto test_nvram_exit;
2796 }
2797
de0c62db 2798 if (magic != 0x669955aa) {
51c1a580
MS
2799 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2800 "wrong magic value (0x%08x)\n", magic);
de0c62db
DK
2801 rc = -ENODEV;
2802 goto test_nvram_exit;
2803 }
2804
edb944d2
DK
2805 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
2806 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
2807 if (rc)
2808 goto test_nvram_exit;
de0c62db 2809
edb944d2
DK
2810 if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
2811 u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
2812 SHARED_HW_CFG_HIDE_PORT1;
de0c62db 2813
edb944d2 2814 if (!hide) {
51c1a580 2815 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
edb944d2
DK
2816 "Port 1 CRC test-set\n");
2817 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
2818 if (rc)
2819 goto test_nvram_exit;
de0c62db
DK
2820 }
2821 }
2822
edb944d2
DK
2823 rc = bnx2x_test_nvram_dirs(bp, buf);
2824
de0c62db 2825test_nvram_exit:
afa13b4b 2826 kfree(buf);
de0c62db
DK
2827 return rc;
2828}
2829
619c5cb6 2830/* Send an EMPTY ramrod on the first queue */
de0c62db
DK
2831static int bnx2x_test_intr(struct bnx2x *bp)
2832{
3b603066 2833 struct bnx2x_queue_state_params params = {NULL};
de0c62db 2834
51c1a580
MS
2835 if (!netif_running(bp->dev)) {
2836 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2837 "cannot access eeprom when the interface is down\n");
de0c62db 2838 return -ENODEV;
51c1a580 2839 }
de0c62db 2840
15192a8c 2841 params.q_obj = &bp->sp_objs->q_obj;
619c5cb6 2842 params.cmd = BNX2X_Q_CMD_EMPTY;
de0c62db 2843
619c5cb6
VZ
2844 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2845
2846 return bnx2x_queue_state_change(bp, &params);
de0c62db
DK
2847}
2848
2849static void bnx2x_self_test(struct net_device *dev,
2850 struct ethtool_test *etest, u64 *buf)
2851{
2852 struct bnx2x *bp = netdev_priv(dev);
a336ca7c
YR
2853 u8 is_serdes, link_up;
2854 int rc, cnt = 0;
cf2c1df6 2855
de0c62db 2856 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580
MS
2857 netdev_err(bp->dev,
2858 "Handling parity error recovery. Try again later\n");
de0c62db
DK
2859 etest->flags |= ETH_TEST_FL_FAILED;
2860 return;
2861 }
2de67439 2862
8970b2e4
MS
2863 DP(BNX2X_MSG_ETHTOOL,
2864 "Self-test command parameters: offline = %d, external_lb = %d\n",
2865 (etest->flags & ETH_TEST_FL_OFFLINE),
2866 (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
de0c62db 2867
cf2c1df6 2868 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
de0c62db 2869
cf2c1df6 2870 if (!netif_running(dev)) {
97cd1ee6
DK
2871 DP(BNX2X_MSG_ETHTOOL,
2872 "Can't perform self-test when interface is down\n");
de0c62db 2873 return;
cf2c1df6 2874 }
de0c62db 2875
a22f0788 2876 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
a336ca7c 2877 link_up = bp->link_vars.link_up;
cf2c1df6
MS
2878 /* offline tests are not supported in MF mode */
2879 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
de0c62db
DK
2880 int port = BP_PORT(bp);
2881 u32 val;
de0c62db
DK
2882
2883 /* save current value of input enable for TX port IF */
2884 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
2885 /* disable input for TX port IF */
2886 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
2887
5d07d868 2888 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
cf2c1df6
MS
2889 rc = bnx2x_nic_load(bp, LOAD_DIAG);
2890 if (rc) {
2891 etest->flags |= ETH_TEST_FL_FAILED;
2892 DP(BNX2X_MSG_ETHTOOL,
2893 "Can't perform self-test, nic_load (for offline) failed\n");
2894 return;
2895 }
2896
de0c62db 2897 /* wait until link state is restored */
619c5cb6 2898 bnx2x_wait_for_link(bp, 1, is_serdes);
de0c62db
DK
2899
2900 if (bnx2x_test_registers(bp) != 0) {
2901 buf[0] = 1;
2902 etest->flags |= ETH_TEST_FL_FAILED;
2903 }
2904 if (bnx2x_test_memory(bp) != 0) {
2905 buf[1] = 1;
2906 etest->flags |= ETH_TEST_FL_FAILED;
2907 }
f85582f8 2908
8970b2e4 2909 buf[2] = bnx2x_test_loopback(bp); /* internal LB */
de0c62db
DK
2910 if (buf[2] != 0)
2911 etest->flags |= ETH_TEST_FL_FAILED;
2912
8970b2e4
MS
2913 if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
2914 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
2915 if (buf[3] != 0)
2916 etest->flags |= ETH_TEST_FL_FAILED;
2917 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2918 }
2919
5d07d868 2920 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
de0c62db
DK
2921
2922 /* restore input for TX port IF */
2923 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
cf2c1df6
MS
2924 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2925 if (rc) {
2926 etest->flags |= ETH_TEST_FL_FAILED;
2927 DP(BNX2X_MSG_ETHTOOL,
2928 "Can't perform self-test, nic_load (for online) failed\n");
2929 return;
2930 }
de0c62db 2931 /* wait until link state is restored */
a22f0788 2932 bnx2x_wait_for_link(bp, link_up, is_serdes);
de0c62db 2933 }
97cd1ee6
DK
2934 if (bnx2x_test_nvram(bp) != 0) {
2935 if (!IS_MF(bp))
2936 buf[4] = 1;
2937 else
2938 buf[0] = 1;
2939 etest->flags |= ETH_TEST_FL_FAILED;
2940 }
de0c62db 2941 if (bnx2x_test_intr(bp) != 0) {
cf2c1df6
MS
2942 if (!IS_MF(bp))
2943 buf[5] = 1;
2944 else
2945 buf[1] = 1;
de0c62db
DK
2946 etest->flags |= ETH_TEST_FL_FAILED;
2947 }
633ac363 2948
a336ca7c
YR
2949 if (link_up) {
2950 cnt = 100;
2951 while (bnx2x_link_test(bp, is_serdes) && --cnt)
2952 msleep(20);
2953 }
2954
2955 if (!cnt) {
cf2c1df6
MS
2956 if (!IS_MF(bp))
2957 buf[6] = 1;
2958 else
2959 buf[2] = 1;
633ac363
DK
2960 etest->flags |= ETH_TEST_FL_FAILED;
2961 }
de0c62db
DK
2962}
2963
de0c62db
DK
2964#define IS_PORT_STAT(i) \
2965 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
2966#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
fb3bff17
DK
2967#define IS_MF_MODE_STAT(bp) \
2968 (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
de0c62db 2969
619c5cb6
VZ
2970/* ethtool statistics are displayed for all regular ethernet queues and the
2971 * fcoe L2 queue if not disabled
2972 */
1191cb83 2973static int bnx2x_num_stat_queues(struct bnx2x *bp)
619c5cb6
VZ
2974{
2975 return BNX2X_NUM_ETH_QUEUES(bp);
2976}
2977
de0c62db
DK
2978static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2979{
2980 struct bnx2x *bp = netdev_priv(dev);
2981 int i, num_stats;
2982
2983 switch (stringset) {
2984 case ETH_SS_STATS:
2985 if (is_multi(bp)) {
619c5cb6 2986 num_stats = bnx2x_num_stat_queues(bp) *
d5e83632
YM
2987 BNX2X_NUM_Q_STATS;
2988 } else
2989 num_stats = 0;
2990 if (IS_MF_MODE_STAT(bp)) {
2991 for (i = 0; i < BNX2X_NUM_STATS; i++)
2992 if (IS_FUNC_STAT(i))
2993 num_stats++;
2994 } else
2995 num_stats += BNX2X_NUM_STATS;
2996
de0c62db
DK
2997 return num_stats;
2998
2999 case ETH_SS_TEST:
cf2c1df6 3000 return BNX2X_NUM_TESTS(bp);
de0c62db
DK
3001
3002 default:
3003 return -EINVAL;
3004 }
3005}
3006
3007static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3008{
3009 struct bnx2x *bp = netdev_priv(dev);
5889335c 3010 int i, j, k, start;
ec6ba945 3011 char queue_name[MAX_QUEUE_NAME_LEN+1];
de0c62db
DK
3012
3013 switch (stringset) {
3014 case ETH_SS_STATS:
d5e83632 3015 k = 0;
de0c62db 3016 if (is_multi(bp)) {
619c5cb6 3017 for_each_eth_queue(bp, i) {
ec6ba945 3018 memset(queue_name, 0, sizeof(queue_name));
619c5cb6 3019 sprintf(queue_name, "%d", i);
de0c62db 3020 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
ec6ba945
VZ
3021 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
3022 ETH_GSTRING_LEN,
3023 bnx2x_q_stats_arr[j].string,
3024 queue_name);
de0c62db
DK
3025 k += BNX2X_NUM_Q_STATS;
3026 }
de0c62db 3027 }
d5e83632
YM
3028
3029
3030 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3031 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
3032 continue;
3033 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
3034 bnx2x_stats_arr[i].string);
3035 j++;
3036 }
3037
de0c62db
DK
3038 break;
3039
3040 case ETH_SS_TEST:
cf2c1df6
MS
3041 /* First 4 tests cannot be done in MF mode */
3042 if (!IS_MF(bp))
3043 start = 0;
3044 else
3045 start = 4;
5889335c
MS
3046 memcpy(buf, bnx2x_tests_str_arr + start,
3047 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
de0c62db
DK
3048 }
3049}
3050
3051static void bnx2x_get_ethtool_stats(struct net_device *dev,
3052 struct ethtool_stats *stats, u64 *buf)
3053{
3054 struct bnx2x *bp = netdev_priv(dev);
3055 u32 *hw_stats, *offset;
d5e83632 3056 int i, j, k = 0;
de0c62db
DK
3057
3058 if (is_multi(bp)) {
619c5cb6 3059 for_each_eth_queue(bp, i) {
15192a8c 3060 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
de0c62db
DK
3061 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
3062 if (bnx2x_q_stats_arr[j].size == 0) {
3063 /* skip this counter */
3064 buf[k + j] = 0;
3065 continue;
3066 }
3067 offset = (hw_stats +
3068 bnx2x_q_stats_arr[j].offset);
3069 if (bnx2x_q_stats_arr[j].size == 4) {
3070 /* 4-byte counter */
3071 buf[k + j] = (u64) *offset;
3072 continue;
3073 }
3074 /* 8-byte counter */
3075 buf[k + j] = HILO_U64(*offset, *(offset + 1));
3076 }
3077 k += BNX2X_NUM_Q_STATS;
3078 }
d5e83632
YM
3079 }
3080
3081 hw_stats = (u32 *)&bp->eth_stats;
3082 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3083 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
3084 continue;
3085 if (bnx2x_stats_arr[i].size == 0) {
3086 /* skip this counter */
3087 buf[k + j] = 0;
3088 j++;
3089 continue;
de0c62db 3090 }
d5e83632
YM
3091 offset = (hw_stats + bnx2x_stats_arr[i].offset);
3092 if (bnx2x_stats_arr[i].size == 4) {
3093 /* 4-byte counter */
3094 buf[k + j] = (u64) *offset;
de0c62db 3095 j++;
d5e83632 3096 continue;
de0c62db 3097 }
d5e83632
YM
3098 /* 8-byte counter */
3099 buf[k + j] = HILO_U64(*offset, *(offset + 1));
3100 j++;
de0c62db
DK
3101 }
3102}
3103
32d36134 3104static int bnx2x_set_phys_id(struct net_device *dev,
3105 enum ethtool_phys_id_state state)
de0c62db
DK
3106{
3107 struct bnx2x *bp = netdev_priv(dev);
de0c62db 3108
51c1a580
MS
3109 if (!netif_running(dev)) {
3110 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
3111 "cannot access eeprom when the interface is down\n");
32d36134 3112 return -EAGAIN;
51c1a580 3113 }
de0c62db 3114
51c1a580
MS
3115 if (!bp->port.pmf) {
3116 DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
32d36134 3117 return -EOPNOTSUPP;
51c1a580 3118 }
de0c62db 3119
32d36134 3120 switch (state) {
3121 case ETHTOOL_ID_ACTIVE:
fce55922 3122 return 1; /* cycle on/off once per second */
de0c62db 3123
32d36134 3124 case ETHTOOL_ID_ON:
8203c4b6 3125 bnx2x_acquire_phy_lock(bp);
32d36134 3126 bnx2x_set_led(&bp->link_params, &bp->link_vars,
e1943424 3127 LED_MODE_ON, SPEED_1000);
8203c4b6 3128 bnx2x_release_phy_lock(bp);
32d36134 3129 break;
de0c62db 3130
32d36134 3131 case ETHTOOL_ID_OFF:
8203c4b6 3132 bnx2x_acquire_phy_lock(bp);
32d36134 3133 bnx2x_set_led(&bp->link_params, &bp->link_vars,
e1943424 3134 LED_MODE_FRONT_PANEL_OFF, 0);
8203c4b6 3135 bnx2x_release_phy_lock(bp);
32d36134 3136 break;
3137
3138 case ETHTOOL_ID_INACTIVE:
8203c4b6 3139 bnx2x_acquire_phy_lock(bp);
e1943424
DM
3140 bnx2x_set_led(&bp->link_params, &bp->link_vars,
3141 LED_MODE_OPER,
3142 bp->link_vars.line_speed);
8203c4b6 3143 bnx2x_release_phy_lock(bp);
32d36134 3144 }
de0c62db
DK
3145
3146 return 0;
3147}
3148
5d317c6a
MS
3149static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3150{
3151
3152 switch (info->flow_type) {
3153 case TCP_V4_FLOW:
3154 case TCP_V6_FLOW:
3155 info->data = RXH_IP_SRC | RXH_IP_DST |
3156 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3157 break;
3158 case UDP_V4_FLOW:
3159 if (bp->rss_conf_obj.udp_rss_v4)
3160 info->data = RXH_IP_SRC | RXH_IP_DST |
3161 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3162 else
3163 info->data = RXH_IP_SRC | RXH_IP_DST;
3164 break;
3165 case UDP_V6_FLOW:
3166 if (bp->rss_conf_obj.udp_rss_v6)
3167 info->data = RXH_IP_SRC | RXH_IP_DST |
3168 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3169 else
3170 info->data = RXH_IP_SRC | RXH_IP_DST;
3171 break;
3172 case IPV4_FLOW:
3173 case IPV6_FLOW:
3174 info->data = RXH_IP_SRC | RXH_IP_DST;
3175 break;
3176 default:
3177 info->data = 0;
3178 break;
3179 }
3180
3181 return 0;
3182}
3183
ab532cf3 3184static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 3185 u32 *rules __always_unused)
ab532cf3
TH
3186{
3187 struct bnx2x *bp = netdev_priv(dev);
3188
3189 switch (info->cmd) {
3190 case ETHTOOL_GRXRINGS:
3191 info->data = BNX2X_NUM_ETH_QUEUES(bp);
3192 return 0;
5d317c6a
MS
3193 case ETHTOOL_GRXFH:
3194 return bnx2x_get_rss_flags(bp, info);
3195 default:
3196 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
3197 return -EOPNOTSUPP;
3198 }
3199}
3200
3201static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3202{
3203 int udp_rss_requested;
3204
3205 DP(BNX2X_MSG_ETHTOOL,
3206 "Set rss flags command parameters: flow type = %d, data = %llu\n",
3207 info->flow_type, info->data);
3208
3209 switch (info->flow_type) {
3210 case TCP_V4_FLOW:
3211 case TCP_V6_FLOW:
3212 /* For TCP only 4-tupple hash is supported */
3213 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
3214 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
3215 DP(BNX2X_MSG_ETHTOOL,
3216 "Command parameters not supported\n");
3217 return -EINVAL;
5d317c6a 3218 }
2de67439 3219 return 0;
5d317c6a
MS
3220
3221 case UDP_V4_FLOW:
3222 case UDP_V6_FLOW:
3223 /* For UDP either 2-tupple hash or 4-tupple hash is supported */
3224 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
2de67439 3225 RXH_L4_B_0_1 | RXH_L4_B_2_3))
5d317c6a
MS
3226 udp_rss_requested = 1;
3227 else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
3228 udp_rss_requested = 0;
3229 else
3230 return -EINVAL;
3231 if ((info->flow_type == UDP_V4_FLOW) &&
3232 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
3233 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
3234 DP(BNX2X_MSG_ETHTOOL,
3235 "rss re-configured, UDP 4-tupple %s\n",
3236 udp_rss_requested ? "enabled" : "disabled");
3237 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
3238 } else if ((info->flow_type == UDP_V6_FLOW) &&
3239 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
3240 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
5d317c6a
MS
3241 DP(BNX2X_MSG_ETHTOOL,
3242 "rss re-configured, UDP 4-tupple %s\n",
3243 udp_rss_requested ? "enabled" : "disabled");
337da3e3 3244 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
5d317c6a 3245 }
924d75ab
YM
3246 return 0;
3247
5d317c6a
MS
3248 case IPV4_FLOW:
3249 case IPV6_FLOW:
3250 /* For IP only 2-tupple hash is supported */
3251 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
3252 DP(BNX2X_MSG_ETHTOOL,
3253 "Command parameters not supported\n");
3254 return -EINVAL;
5d317c6a 3255 }
924d75ab
YM
3256 return 0;
3257
5d317c6a
MS
3258 case SCTP_V4_FLOW:
3259 case AH_ESP_V4_FLOW:
3260 case AH_V4_FLOW:
3261 case ESP_V4_FLOW:
3262 case SCTP_V6_FLOW:
3263 case AH_ESP_V6_FLOW:
3264 case AH_V6_FLOW:
3265 case ESP_V6_FLOW:
3266 case IP_USER_FLOW:
3267 case ETHER_FLOW:
3268 /* RSS is not supported for these protocols */
3269 if (info->data) {
3270 DP(BNX2X_MSG_ETHTOOL,
3271 "Command parameters not supported\n");
3272 return -EINVAL;
5d317c6a 3273 }
924d75ab
YM
3274 return 0;
3275
5d317c6a
MS
3276 default:
3277 return -EINVAL;
3278 }
3279}
3280
3281static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3282{
3283 struct bnx2x *bp = netdev_priv(dev);
ab532cf3 3284
5d317c6a
MS
3285 switch (info->cmd) {
3286 case ETHTOOL_SRXFH:
3287 return bnx2x_set_rss_flags(bp, info);
ab532cf3 3288 default:
51c1a580 3289 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
ab532cf3
TH
3290 return -EOPNOTSUPP;
3291 }
3292}
3293
7850f63f
BH
3294static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
3295{
96305234 3296 return T_ETH_INDIRECTION_TABLE_SIZE;
7850f63f
BH
3297}
3298
3299static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
ab532cf3
TH
3300{
3301 struct bnx2x *bp = netdev_priv(dev);
619c5cb6
VZ
3302 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
3303 size_t i;
ab532cf3 3304
619c5cb6
VZ
3305 /* Get the current configuration of the RSS indirection table */
3306 bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
3307
3308 /*
3309 * We can't use a memcpy() as an internal storage of an
3310 * indirection table is a u8 array while indir->ring_index
3311 * points to an array of u32.
3312 *
3313 * Indirection table contains the FW Client IDs, so we need to
3314 * align the returned table to the Client ID of the leading RSS
3315 * queue.
3316 */
7850f63f
BH
3317 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
3318 indir[i] = ind_table[i] - bp->fp->cl_id;
619c5cb6 3319
ab532cf3
TH
3320 return 0;
3321}
3322
7850f63f 3323static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
ab532cf3
TH
3324{
3325 struct bnx2x *bp = netdev_priv(dev);
3326 size_t i;
619c5cb6
VZ
3327
3328 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
619c5cb6
VZ
3329 /*
3330 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
3331 * as an internal storage of an indirection table is a u8 array
3332 * while indir->ring_index points to an array of u32.
3333 *
3334 * Indirection table contains the FW Client IDs, so we need to
3335 * align the received table to the Client ID of the leading RSS
3336 * queue
3337 */
5d317c6a 3338 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
619c5cb6 3339 }
ab532cf3 3340
5d317c6a 3341 return bnx2x_config_rss_eth(bp, false);
ab532cf3
TH
3342}
3343
0e8d2ec5
MS
3344/**
3345 * bnx2x_get_channels - gets the number of RSS queues.
3346 *
3347 * @dev: net device
3348 * @channels: returns the number of max / current queues
3349 */
3350static void bnx2x_get_channels(struct net_device *dev,
3351 struct ethtool_channels *channels)
3352{
3353 struct bnx2x *bp = netdev_priv(dev);
3354
3355 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
3356 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
3357}
3358
3359/**
3360 * bnx2x_change_num_queues - change the number of RSS queues.
3361 *
3362 * @bp: bnx2x private structure
3363 *
3364 * Re-configure interrupt mode to get the new number of MSI-X
3365 * vectors and re-add NAPI objects.
3366 */
3367static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
3368{
0e8d2ec5 3369 bnx2x_disable_msi(bp);
55c11941
MS
3370 bp->num_ethernet_queues = num_rss;
3371 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
3372 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
0e8d2ec5 3373 bnx2x_set_int_mode(bp);
0e8d2ec5
MS
3374}
3375
3376/**
3377 * bnx2x_set_channels - sets the number of RSS queues.
3378 *
3379 * @dev: net device
3380 * @channels: includes the number of queues requested
3381 */
3382static int bnx2x_set_channels(struct net_device *dev,
3383 struct ethtool_channels *channels)
3384{
3385 struct bnx2x *bp = netdev_priv(dev);
3386
3387
3388 DP(BNX2X_MSG_ETHTOOL,
3389 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
3390 channels->rx_count, channels->tx_count, channels->other_count,
3391 channels->combined_count);
3392
3393 /* We don't support separate rx / tx channels.
3394 * We don't allow setting 'other' channels.
3395 */
3396 if (channels->rx_count || channels->tx_count || channels->other_count
3397 || (channels->combined_count == 0) ||
3398 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
3399 DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
3400 return -EINVAL;
3401 }
3402
3403 /* Check if there was a change in the active parameters */
3404 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
3405 DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
3406 return 0;
3407 }
3408
3409 /* Set the requested number of queues in bp context.
3410 * Note that the actual number of queues created during load may be
3411 * less than requested if memory is low.
3412 */
3413 if (unlikely(!netif_running(dev))) {
3414 bnx2x_change_num_queues(bp, channels->combined_count);
3415 return 0;
3416 }
5d07d868 3417 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
0e8d2ec5
MS
3418 bnx2x_change_num_queues(bp, channels->combined_count);
3419 return bnx2x_nic_load(bp, LOAD_NORMAL);
3420}
3421
de0c62db
DK
3422static const struct ethtool_ops bnx2x_ethtool_ops = {
3423 .get_settings = bnx2x_get_settings,
3424 .set_settings = bnx2x_set_settings,
3425 .get_drvinfo = bnx2x_get_drvinfo,
3426 .get_regs_len = bnx2x_get_regs_len,
3427 .get_regs = bnx2x_get_regs,
07ba6af4
MS
3428 .get_dump_flag = bnx2x_get_dump_flag,
3429 .get_dump_data = bnx2x_get_dump_data,
3430 .set_dump = bnx2x_set_dump,
de0c62db
DK
3431 .get_wol = bnx2x_get_wol,
3432 .set_wol = bnx2x_set_wol,
3433 .get_msglevel = bnx2x_get_msglevel,
3434 .set_msglevel = bnx2x_set_msglevel,
3435 .nway_reset = bnx2x_nway_reset,
3436 .get_link = bnx2x_get_link,
3437 .get_eeprom_len = bnx2x_get_eeprom_len,
3438 .get_eeprom = bnx2x_get_eeprom,
3439 .set_eeprom = bnx2x_set_eeprom,
3440 .get_coalesce = bnx2x_get_coalesce,
3441 .set_coalesce = bnx2x_set_coalesce,
3442 .get_ringparam = bnx2x_get_ringparam,
3443 .set_ringparam = bnx2x_set_ringparam,
3444 .get_pauseparam = bnx2x_get_pauseparam,
3445 .set_pauseparam = bnx2x_set_pauseparam,
de0c62db
DK
3446 .self_test = bnx2x_self_test,
3447 .get_sset_count = bnx2x_get_sset_count,
3448 .get_strings = bnx2x_get_strings,
32d36134 3449 .set_phys_id = bnx2x_set_phys_id,
de0c62db 3450 .get_ethtool_stats = bnx2x_get_ethtool_stats,
ab532cf3 3451 .get_rxnfc = bnx2x_get_rxnfc,
5d317c6a 3452 .set_rxnfc = bnx2x_set_rxnfc,
7850f63f 3453 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
ab532cf3
TH
3454 .get_rxfh_indir = bnx2x_get_rxfh_indir,
3455 .set_rxfh_indir = bnx2x_set_rxfh_indir,
0e8d2ec5
MS
3456 .get_channels = bnx2x_get_channels,
3457 .set_channels = bnx2x_set_channels,
24ea818e
YM
3458 .get_module_info = bnx2x_get_module_info,
3459 .get_module_eeprom = bnx2x_get_module_eeprom,
e9939c80
YM
3460 .get_eee = bnx2x_get_eee,
3461 .set_eee = bnx2x_set_eee,
be53ce1e 3462 .get_ts_info = ethtool_op_get_ts_info,
de0c62db
DK
3463};
3464
005a07ba
AE
3465static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3466 .get_settings = bnx2x_get_settings,
3467 .set_settings = bnx2x_set_settings,
3468 .get_drvinfo = bnx2x_get_drvinfo,
3469 .get_msglevel = bnx2x_get_msglevel,
3470 .set_msglevel = bnx2x_set_msglevel,
3471 .get_link = bnx2x_get_link,
3472 .get_coalesce = bnx2x_get_coalesce,
3473 .get_ringparam = bnx2x_get_ringparam,
3474 .set_ringparam = bnx2x_set_ringparam,
3475 .get_sset_count = bnx2x_get_sset_count,
3476 .get_strings = bnx2x_get_strings,
3477 .get_ethtool_stats = bnx2x_get_ethtool_stats,
3478 .get_rxnfc = bnx2x_get_rxnfc,
3479 .set_rxnfc = bnx2x_set_rxnfc,
3480 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3481 .get_rxfh_indir = bnx2x_get_rxfh_indir,
3482 .set_rxfh_indir = bnx2x_set_rxfh_indir,
3483 .get_channels = bnx2x_get_channels,
3484 .set_channels = bnx2x_set_channels,
3485};
3486
3487void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
de0c62db 3488{
005a07ba
AE
3489 if (IS_PF(bp))
3490 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
3491 else /* vf */
3492 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
de0c62db 3493}