Commit | Line | Data |
---|---|---|
8b230ed8 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
18 | ||
19 | #include "cna.h" | |
20 | ||
21 | #include <linux/netdevice.h> | |
22 | #include <linux/skbuff.h> | |
23 | #include <linux/ethtool.h> | |
24 | #include <linux/rtnetlink.h> | |
25 | ||
26 | #include "bna.h" | |
27 | ||
28 | #include "bnad.h" | |
29 | ||
30 | #define BNAD_NUM_TXF_COUNTERS 12 | |
31 | #define BNAD_NUM_RXF_COUNTERS 10 | |
078086f3 | 32 | #define BNAD_NUM_CQ_COUNTERS (3 + 5) |
8b230ed8 RM |
33 | #define BNAD_NUM_RXQ_COUNTERS 6 |
34 | #define BNAD_NUM_TXQ_COUNTERS 5 | |
35 | ||
36 | #define BNAD_ETHTOOL_STATS_NUM \ | |
250e061e | 37 | (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ |
8b230ed8 | 38 | sizeof(struct bnad_drv_stats) / sizeof(u64) + \ |
078086f3 | 39 | offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) |
8b230ed8 RM |
40 | |
41 | static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | |
42 | "rx_packets", | |
43 | "tx_packets", | |
44 | "rx_bytes", | |
45 | "tx_bytes", | |
46 | "rx_errors", | |
47 | "tx_errors", | |
48 | "rx_dropped", | |
49 | "tx_dropped", | |
50 | "multicast", | |
51 | "collisions", | |
52 | ||
53 | "rx_length_errors", | |
54 | "rx_over_errors", | |
55 | "rx_crc_errors", | |
56 | "rx_frame_errors", | |
57 | "rx_fifo_errors", | |
58 | "rx_missed_errors", | |
59 | ||
60 | "tx_aborted_errors", | |
61 | "tx_carrier_errors", | |
62 | "tx_fifo_errors", | |
63 | "tx_heartbeat_errors", | |
64 | "tx_window_errors", | |
65 | ||
66 | "rx_compressed", | |
67 | "tx_compressed", | |
68 | ||
69 | "netif_queue_stop", | |
70 | "netif_queue_wakeup", | |
f7c0fa4c | 71 | "netif_queue_stopped", |
8b230ed8 RM |
72 | "tso4", |
73 | "tso6", | |
74 | "tso_err", | |
75 | "tcpcsum_offload", | |
76 | "udpcsum_offload", | |
77 | "csum_help", | |
78 | "csum_help_err", | |
79 | "hw_stats_updates", | |
80 | "netif_rx_schedule", | |
81 | "netif_rx_complete", | |
82 | "netif_rx_dropped", | |
83 | ||
84 | "link_toggle", | |
85 | "cee_up", | |
86 | ||
87 | "rxp_info_alloc_failed", | |
88 | "mbox_intr_disabled", | |
89 | "mbox_intr_enabled", | |
90 | "tx_unmap_q_alloc_failed", | |
91 | "rx_unmap_q_alloc_failed", | |
92 | "rxbuf_alloc_failed", | |
93 | ||
94 | "mac_frame_64", | |
95 | "mac_frame_65_127", | |
96 | "mac_frame_128_255", | |
97 | "mac_frame_256_511", | |
98 | "mac_frame_512_1023", | |
99 | "mac_frame_1024_1518", | |
100 | "mac_frame_1518_1522", | |
101 | "mac_rx_bytes", | |
102 | "mac_rx_packets", | |
103 | "mac_rx_fcs_error", | |
104 | "mac_rx_multicast", | |
105 | "mac_rx_broadcast", | |
106 | "mac_rx_control_frames", | |
107 | "mac_rx_pause", | |
108 | "mac_rx_unknown_opcode", | |
109 | "mac_rx_alignment_error", | |
110 | "mac_rx_frame_length_error", | |
111 | "mac_rx_code_error", | |
112 | "mac_rx_carrier_sense_error", | |
113 | "mac_rx_undersize", | |
114 | "mac_rx_oversize", | |
115 | "mac_rx_fragments", | |
116 | "mac_rx_jabber", | |
117 | "mac_rx_drop", | |
118 | ||
119 | "mac_tx_bytes", | |
120 | "mac_tx_packets", | |
121 | "mac_tx_multicast", | |
122 | "mac_tx_broadcast", | |
123 | "mac_tx_pause", | |
124 | "mac_tx_deferral", | |
125 | "mac_tx_excessive_deferral", | |
126 | "mac_tx_single_collision", | |
127 | "mac_tx_muliple_collision", | |
128 | "mac_tx_late_collision", | |
129 | "mac_tx_excessive_collision", | |
130 | "mac_tx_total_collision", | |
131 | "mac_tx_pause_honored", | |
132 | "mac_tx_drop", | |
133 | "mac_tx_jabber", | |
134 | "mac_tx_fcs_error", | |
135 | "mac_tx_control_frame", | |
136 | "mac_tx_oversize", | |
137 | "mac_tx_undersize", | |
138 | "mac_tx_fragments", | |
139 | ||
140 | "bpc_tx_pause_0", | |
141 | "bpc_tx_pause_1", | |
142 | "bpc_tx_pause_2", | |
143 | "bpc_tx_pause_3", | |
144 | "bpc_tx_pause_4", | |
145 | "bpc_tx_pause_5", | |
146 | "bpc_tx_pause_6", | |
147 | "bpc_tx_pause_7", | |
148 | "bpc_tx_zero_pause_0", | |
149 | "bpc_tx_zero_pause_1", | |
150 | "bpc_tx_zero_pause_2", | |
151 | "bpc_tx_zero_pause_3", | |
152 | "bpc_tx_zero_pause_4", | |
153 | "bpc_tx_zero_pause_5", | |
154 | "bpc_tx_zero_pause_6", | |
155 | "bpc_tx_zero_pause_7", | |
156 | "bpc_tx_first_pause_0", | |
157 | "bpc_tx_first_pause_1", | |
158 | "bpc_tx_first_pause_2", | |
159 | "bpc_tx_first_pause_3", | |
160 | "bpc_tx_first_pause_4", | |
161 | "bpc_tx_first_pause_5", | |
162 | "bpc_tx_first_pause_6", | |
163 | "bpc_tx_first_pause_7", | |
164 | ||
165 | "bpc_rx_pause_0", | |
166 | "bpc_rx_pause_1", | |
167 | "bpc_rx_pause_2", | |
168 | "bpc_rx_pause_3", | |
169 | "bpc_rx_pause_4", | |
170 | "bpc_rx_pause_5", | |
171 | "bpc_rx_pause_6", | |
172 | "bpc_rx_pause_7", | |
173 | "bpc_rx_zero_pause_0", | |
174 | "bpc_rx_zero_pause_1", | |
175 | "bpc_rx_zero_pause_2", | |
176 | "bpc_rx_zero_pause_3", | |
177 | "bpc_rx_zero_pause_4", | |
178 | "bpc_rx_zero_pause_5", | |
179 | "bpc_rx_zero_pause_6", | |
180 | "bpc_rx_zero_pause_7", | |
181 | "bpc_rx_first_pause_0", | |
182 | "bpc_rx_first_pause_1", | |
183 | "bpc_rx_first_pause_2", | |
184 | "bpc_rx_first_pause_3", | |
185 | "bpc_rx_first_pause_4", | |
186 | "bpc_rx_first_pause_5", | |
187 | "bpc_rx_first_pause_6", | |
188 | "bpc_rx_first_pause_7", | |
189 | ||
190 | "rad_rx_frames", | |
191 | "rad_rx_octets", | |
192 | "rad_rx_vlan_frames", | |
193 | "rad_rx_ucast", | |
194 | "rad_rx_ucast_octets", | |
195 | "rad_rx_ucast_vlan", | |
196 | "rad_rx_mcast", | |
197 | "rad_rx_mcast_octets", | |
198 | "rad_rx_mcast_vlan", | |
199 | "rad_rx_bcast", | |
200 | "rad_rx_bcast_octets", | |
201 | "rad_rx_bcast_vlan", | |
202 | "rad_rx_drops", | |
203 | ||
204 | "fc_rx_ucast_octets", | |
205 | "fc_rx_ucast", | |
206 | "fc_rx_ucast_vlan", | |
207 | "fc_rx_mcast_octets", | |
208 | "fc_rx_mcast", | |
209 | "fc_rx_mcast_vlan", | |
210 | "fc_rx_bcast_octets", | |
211 | "fc_rx_bcast", | |
212 | "fc_rx_bcast_vlan", | |
213 | ||
214 | "fc_tx_ucast_octets", | |
215 | "fc_tx_ucast", | |
216 | "fc_tx_ucast_vlan", | |
217 | "fc_tx_mcast_octets", | |
218 | "fc_tx_mcast", | |
219 | "fc_tx_mcast_vlan", | |
220 | "fc_tx_bcast_octets", | |
221 | "fc_tx_bcast", | |
222 | "fc_tx_bcast_vlan", | |
223 | "fc_tx_parity_errors", | |
224 | "fc_tx_timeout", | |
225 | "fc_tx_fid_parity_errors", | |
226 | }; | |
227 | ||
228 | static int | |
229 | bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | |
230 | { | |
231 | cmd->supported = SUPPORTED_10000baseT_Full; | |
232 | cmd->advertising = ADVERTISED_10000baseT_Full; | |
233 | cmd->autoneg = AUTONEG_DISABLE; | |
234 | cmd->supported |= SUPPORTED_FIBRE; | |
235 | cmd->advertising |= ADVERTISED_FIBRE; | |
236 | cmd->port = PORT_FIBRE; | |
237 | cmd->phy_address = 0; | |
238 | ||
239 | if (netif_carrier_ok(netdev)) { | |
70739497 | 240 | ethtool_cmd_speed_set(cmd, SPEED_10000); |
8b230ed8 RM |
241 | cmd->duplex = DUPLEX_FULL; |
242 | } else { | |
70739497 | 243 | ethtool_cmd_speed_set(cmd, -1); |
8b230ed8 RM |
244 | cmd->duplex = -1; |
245 | } | |
246 | cmd->transceiver = XCVR_EXTERNAL; | |
247 | cmd->maxtxpkt = 0; | |
248 | cmd->maxrxpkt = 0; | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | static int | |
254 | bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) | |
255 | { | |
256 | /* 10G full duplex setting supported only */ | |
257 | if (cmd->autoneg == AUTONEG_ENABLE) | |
258 | return -EOPNOTSUPP; else { | |
25db0338 DD |
259 | if ((ethtool_cmd_speed(cmd) == SPEED_10000) |
260 | && (cmd->duplex == DUPLEX_FULL)) | |
8b230ed8 RM |
261 | return 0; |
262 | } | |
263 | ||
264 | return -EOPNOTSUPP; | |
265 | } | |
266 | ||
267 | static void | |
268 | bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |
269 | { | |
270 | struct bnad *bnad = netdev_priv(netdev); | |
271 | struct bfa_ioc_attr *ioc_attr; | |
272 | unsigned long flags; | |
273 | ||
274 | strcpy(drvinfo->driver, BNAD_NAME); | |
275 | strcpy(drvinfo->version, BNAD_VERSION); | |
276 | ||
277 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); | |
278 | if (ioc_attr) { | |
8b230ed8 | 279 | spin_lock_irqsave(&bnad->bna_lock, flags); |
078086f3 | 280 | bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); |
8b230ed8 RM |
281 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
282 | ||
283 | strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, | |
284 | sizeof(drvinfo->fw_version) - 1); | |
285 | kfree(ioc_attr); | |
286 | } | |
287 | ||
288 | strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN); | |
289 | } | |
290 | ||
8b230ed8 RM |
291 | static void |
292 | bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) | |
293 | { | |
294 | wolinfo->supported = 0; | |
295 | wolinfo->wolopts = 0; | |
296 | } | |
297 | ||
298 | static int | |
299 | bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | |
300 | { | |
301 | struct bnad *bnad = netdev_priv(netdev); | |
302 | unsigned long flags; | |
303 | ||
304 | /* Lock rqd. to access bnad->bna_lock */ | |
305 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
306 | coalesce->use_adaptive_rx_coalesce = | |
307 | (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; | |
308 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
309 | ||
310 | coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * | |
311 | BFI_COALESCING_TIMER_UNIT; | |
312 | coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * | |
313 | BFI_COALESCING_TIMER_UNIT; | |
314 | coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT; | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | static int | |
320 | bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) | |
321 | { | |
322 | struct bnad *bnad = netdev_priv(netdev); | |
323 | unsigned long flags; | |
324 | int dim_timer_del = 0; | |
325 | ||
326 | if (coalesce->rx_coalesce_usecs == 0 || | |
327 | coalesce->rx_coalesce_usecs > | |
328 | BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) | |
329 | return -EINVAL; | |
330 | ||
331 | if (coalesce->tx_coalesce_usecs == 0 || | |
332 | coalesce->tx_coalesce_usecs > | |
333 | BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) | |
334 | return -EINVAL; | |
335 | ||
336 | mutex_lock(&bnad->conf_mutex); | |
337 | /* | |
338 | * Do not need to store rx_coalesce_usecs here | |
339 | * Every time DIM is disabled, we can get it from the | |
340 | * stack. | |
341 | */ | |
342 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
343 | if (coalesce->use_adaptive_rx_coalesce) { | |
344 | if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { | |
345 | bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; | |
346 | bnad_dim_timer_start(bnad); | |
347 | } | |
348 | } else { | |
349 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { | |
350 | bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; | |
351 | dim_timer_del = bnad_dim_timer_running(bnad); | |
352 | if (dim_timer_del) { | |
353 | clear_bit(BNAD_RF_DIM_TIMER_RUNNING, | |
354 | &bnad->run_flags); | |
355 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
356 | del_timer_sync(&bnad->dim_timer); | |
357 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
358 | } | |
359 | bnad_rx_coalescing_timeo_set(bnad); | |
360 | } | |
361 | } | |
362 | if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / | |
363 | BFI_COALESCING_TIMER_UNIT) { | |
364 | bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / | |
365 | BFI_COALESCING_TIMER_UNIT; | |
366 | bnad_tx_coalescing_timeo_set(bnad); | |
367 | } | |
368 | ||
369 | if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / | |
370 | BFI_COALESCING_TIMER_UNIT) { | |
371 | bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / | |
372 | BFI_COALESCING_TIMER_UNIT; | |
373 | ||
374 | if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) | |
375 | bnad_rx_coalescing_timeo_set(bnad); | |
376 | ||
377 | } | |
378 | ||
379 | /* Add Tx Inter-pkt DMA count? */ | |
380 | ||
381 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
382 | ||
383 | mutex_unlock(&bnad->conf_mutex); | |
384 | return 0; | |
385 | } | |
386 | ||
387 | static void | |
388 | bnad_get_ringparam(struct net_device *netdev, | |
389 | struct ethtool_ringparam *ringparam) | |
390 | { | |
391 | struct bnad *bnad = netdev_priv(netdev); | |
392 | ||
393 | ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq; | |
394 | ringparam->rx_mini_max_pending = 0; | |
395 | ringparam->rx_jumbo_max_pending = 0; | |
396 | ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH; | |
397 | ||
398 | ringparam->rx_pending = bnad->rxq_depth; | |
399 | ringparam->rx_mini_max_pending = 0; | |
400 | ringparam->rx_jumbo_max_pending = 0; | |
401 | ringparam->tx_pending = bnad->txq_depth; | |
402 | } | |
403 | ||
404 | static int | |
405 | bnad_set_ringparam(struct net_device *netdev, | |
406 | struct ethtool_ringparam *ringparam) | |
407 | { | |
408 | int i, current_err, err = 0; | |
409 | struct bnad *bnad = netdev_priv(netdev); | |
410 | ||
411 | mutex_lock(&bnad->conf_mutex); | |
412 | if (ringparam->rx_pending == bnad->rxq_depth && | |
413 | ringparam->tx_pending == bnad->txq_depth) { | |
414 | mutex_unlock(&bnad->conf_mutex); | |
415 | return 0; | |
416 | } | |
417 | ||
418 | if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH || | |
419 | ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq || | |
420 | !BNA_POWER_OF_2(ringparam->rx_pending)) { | |
421 | mutex_unlock(&bnad->conf_mutex); | |
422 | return -EINVAL; | |
423 | } | |
424 | if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH || | |
425 | ringparam->tx_pending > BNAD_MAX_Q_DEPTH || | |
426 | !BNA_POWER_OF_2(ringparam->tx_pending)) { | |
427 | mutex_unlock(&bnad->conf_mutex); | |
428 | return -EINVAL; | |
429 | } | |
430 | ||
431 | if (ringparam->rx_pending != bnad->rxq_depth) { | |
432 | bnad->rxq_depth = ringparam->rx_pending; | |
433 | for (i = 0; i < bnad->num_rx; i++) { | |
434 | if (!bnad->rx_info[i].rx) | |
435 | continue; | |
436 | bnad_cleanup_rx(bnad, i); | |
437 | current_err = bnad_setup_rx(bnad, i); | |
438 | if (current_err && !err) | |
439 | err = current_err; | |
440 | } | |
441 | } | |
442 | if (ringparam->tx_pending != bnad->txq_depth) { | |
443 | bnad->txq_depth = ringparam->tx_pending; | |
444 | for (i = 0; i < bnad->num_tx; i++) { | |
445 | if (!bnad->tx_info[i].tx) | |
446 | continue; | |
447 | bnad_cleanup_tx(bnad, i); | |
448 | current_err = bnad_setup_tx(bnad, i); | |
449 | if (current_err && !err) | |
450 | err = current_err; | |
451 | } | |
452 | } | |
453 | ||
454 | mutex_unlock(&bnad->conf_mutex); | |
455 | return err; | |
456 | } | |
457 | ||
458 | static void | |
459 | bnad_get_pauseparam(struct net_device *netdev, | |
460 | struct ethtool_pauseparam *pauseparam) | |
461 | { | |
462 | struct bnad *bnad = netdev_priv(netdev); | |
463 | ||
464 | pauseparam->autoneg = 0; | |
078086f3 RM |
465 | pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause; |
466 | pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause; | |
8b230ed8 RM |
467 | } |
468 | ||
469 | static int | |
470 | bnad_set_pauseparam(struct net_device *netdev, | |
471 | struct ethtool_pauseparam *pauseparam) | |
472 | { | |
473 | struct bnad *bnad = netdev_priv(netdev); | |
474 | struct bna_pause_config pause_config; | |
475 | unsigned long flags; | |
476 | ||
477 | if (pauseparam->autoneg == AUTONEG_ENABLE) | |
478 | return -EINVAL; | |
479 | ||
480 | mutex_lock(&bnad->conf_mutex); | |
078086f3 RM |
481 | if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause || |
482 | pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) { | |
8b230ed8 RM |
483 | pause_config.rx_pause = pauseparam->rx_pause; |
484 | pause_config.tx_pause = pauseparam->tx_pause; | |
485 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 486 | bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); |
8b230ed8 RM |
487 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
488 | } | |
489 | mutex_unlock(&bnad->conf_mutex); | |
490 | return 0; | |
491 | } | |
492 | ||
8b230ed8 RM |
493 | static void |
494 | bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) | |
495 | { | |
496 | struct bnad *bnad = netdev_priv(netdev); | |
497 | int i, j, q_num; | |
078086f3 | 498 | u32 bmap; |
8b230ed8 RM |
499 | |
500 | mutex_lock(&bnad->conf_mutex); | |
501 | ||
502 | switch (stringset) { | |
503 | case ETH_SS_STATS: | |
504 | for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { | |
505 | BUG_ON(!(strlen(bnad_net_stats_strings[i]) < | |
506 | ETH_GSTRING_LEN)); | |
507 | memcpy(string, bnad_net_stats_strings[i], | |
508 | ETH_GSTRING_LEN); | |
509 | string += ETH_GSTRING_LEN; | |
510 | } | |
078086f3 RM |
511 | bmap = bna_tx_rid_mask(&bnad->bna); |
512 | for (i = 0; bmap; i++) { | |
8b230ed8 RM |
513 | if (bmap & 1) { |
514 | sprintf(string, "txf%d_ucast_octets", i); | |
515 | string += ETH_GSTRING_LEN; | |
516 | sprintf(string, "txf%d_ucast", i); | |
517 | string += ETH_GSTRING_LEN; | |
518 | sprintf(string, "txf%d_ucast_vlan", i); | |
519 | string += ETH_GSTRING_LEN; | |
520 | sprintf(string, "txf%d_mcast_octets", i); | |
521 | string += ETH_GSTRING_LEN; | |
522 | sprintf(string, "txf%d_mcast", i); | |
523 | string += ETH_GSTRING_LEN; | |
524 | sprintf(string, "txf%d_mcast_vlan", i); | |
525 | string += ETH_GSTRING_LEN; | |
526 | sprintf(string, "txf%d_bcast_octets", i); | |
527 | string += ETH_GSTRING_LEN; | |
528 | sprintf(string, "txf%d_bcast", i); | |
529 | string += ETH_GSTRING_LEN; | |
530 | sprintf(string, "txf%d_bcast_vlan", i); | |
531 | string += ETH_GSTRING_LEN; | |
532 | sprintf(string, "txf%d_errors", i); | |
533 | string += ETH_GSTRING_LEN; | |
534 | sprintf(string, "txf%d_filter_vlan", i); | |
535 | string += ETH_GSTRING_LEN; | |
536 | sprintf(string, "txf%d_filter_mac_sa", i); | |
537 | string += ETH_GSTRING_LEN; | |
538 | } | |
539 | bmap >>= 1; | |
540 | } | |
541 | ||
078086f3 RM |
542 | bmap = bna_rx_rid_mask(&bnad->bna); |
543 | for (i = 0; bmap; i++) { | |
8b230ed8 RM |
544 | if (bmap & 1) { |
545 | sprintf(string, "rxf%d_ucast_octets", i); | |
546 | string += ETH_GSTRING_LEN; | |
547 | sprintf(string, "rxf%d_ucast", i); | |
548 | string += ETH_GSTRING_LEN; | |
549 | sprintf(string, "rxf%d_ucast_vlan", i); | |
550 | string += ETH_GSTRING_LEN; | |
551 | sprintf(string, "rxf%d_mcast_octets", i); | |
552 | string += ETH_GSTRING_LEN; | |
553 | sprintf(string, "rxf%d_mcast", i); | |
554 | string += ETH_GSTRING_LEN; | |
555 | sprintf(string, "rxf%d_mcast_vlan", i); | |
556 | string += ETH_GSTRING_LEN; | |
557 | sprintf(string, "rxf%d_bcast_octets", i); | |
558 | string += ETH_GSTRING_LEN; | |
559 | sprintf(string, "rxf%d_bcast", i); | |
560 | string += ETH_GSTRING_LEN; | |
561 | sprintf(string, "rxf%d_bcast_vlan", i); | |
562 | string += ETH_GSTRING_LEN; | |
563 | sprintf(string, "rxf%d_frame_drops", i); | |
564 | string += ETH_GSTRING_LEN; | |
565 | } | |
566 | bmap >>= 1; | |
567 | } | |
568 | ||
569 | q_num = 0; | |
570 | for (i = 0; i < bnad->num_rx; i++) { | |
571 | if (!bnad->rx_info[i].rx) | |
572 | continue; | |
573 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
574 | sprintf(string, "cq%d_producer_index", q_num); | |
575 | string += ETH_GSTRING_LEN; | |
576 | sprintf(string, "cq%d_consumer_index", q_num); | |
577 | string += ETH_GSTRING_LEN; | |
578 | sprintf(string, "cq%d_hw_producer_index", | |
579 | q_num); | |
580 | string += ETH_GSTRING_LEN; | |
581 | q_num++; | |
582 | } | |
583 | } | |
584 | ||
585 | q_num = 0; | |
586 | for (i = 0; i < bnad->num_rx; i++) { | |
587 | if (!bnad->rx_info[i].rx) | |
588 | continue; | |
589 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
590 | sprintf(string, "rxq%d_packets", q_num); | |
591 | string += ETH_GSTRING_LEN; | |
592 | sprintf(string, "rxq%d_bytes", q_num); | |
593 | string += ETH_GSTRING_LEN; | |
594 | sprintf(string, "rxq%d_packets_with_error", | |
595 | q_num); | |
596 | string += ETH_GSTRING_LEN; | |
597 | sprintf(string, "rxq%d_allocbuf_failed", q_num); | |
598 | string += ETH_GSTRING_LEN; | |
599 | sprintf(string, "rxq%d_producer_index", q_num); | |
600 | string += ETH_GSTRING_LEN; | |
601 | sprintf(string, "rxq%d_consumer_index", q_num); | |
602 | string += ETH_GSTRING_LEN; | |
603 | q_num++; | |
604 | if (bnad->rx_info[i].rx_ctrl[j].ccb && | |
605 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
606 | rcb[1] && | |
607 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
608 | rcb[1]->rxq) { | |
609 | sprintf(string, "rxq%d_packets", q_num); | |
610 | string += ETH_GSTRING_LEN; | |
611 | sprintf(string, "rxq%d_bytes", q_num); | |
612 | string += ETH_GSTRING_LEN; | |
613 | sprintf(string, | |
614 | "rxq%d_packets_with_error", q_num); | |
615 | string += ETH_GSTRING_LEN; | |
616 | sprintf(string, "rxq%d_allocbuf_failed", | |
617 | q_num); | |
618 | string += ETH_GSTRING_LEN; | |
619 | sprintf(string, "rxq%d_producer_index", | |
620 | q_num); | |
621 | string += ETH_GSTRING_LEN; | |
622 | sprintf(string, "rxq%d_consumer_index", | |
623 | q_num); | |
624 | string += ETH_GSTRING_LEN; | |
625 | q_num++; | |
626 | } | |
627 | } | |
628 | } | |
629 | ||
630 | q_num = 0; | |
631 | for (i = 0; i < bnad->num_tx; i++) { | |
632 | if (!bnad->tx_info[i].tx) | |
633 | continue; | |
634 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | |
635 | sprintf(string, "txq%d_packets", q_num); | |
636 | string += ETH_GSTRING_LEN; | |
637 | sprintf(string, "txq%d_bytes", q_num); | |
638 | string += ETH_GSTRING_LEN; | |
639 | sprintf(string, "txq%d_producer_index", q_num); | |
640 | string += ETH_GSTRING_LEN; | |
641 | sprintf(string, "txq%d_consumer_index", q_num); | |
642 | string += ETH_GSTRING_LEN; | |
643 | sprintf(string, "txq%d_hw_consumer_index", | |
644 | q_num); | |
645 | string += ETH_GSTRING_LEN; | |
646 | q_num++; | |
647 | } | |
648 | } | |
649 | ||
650 | break; | |
651 | ||
652 | default: | |
653 | break; | |
654 | } | |
655 | ||
656 | mutex_unlock(&bnad->conf_mutex); | |
657 | } | |
658 | ||
659 | static int | |
660 | bnad_get_stats_count_locked(struct net_device *netdev) | |
661 | { | |
662 | struct bnad *bnad = netdev_priv(netdev); | |
663 | int i, j, count, rxf_active_num = 0, txf_active_num = 0; | |
078086f3 | 664 | u32 bmap; |
8b230ed8 | 665 | |
078086f3 RM |
666 | bmap = bna_tx_rid_mask(&bnad->bna); |
667 | for (i = 0; bmap; i++) { | |
8b230ed8 RM |
668 | if (bmap & 1) |
669 | txf_active_num++; | |
670 | bmap >>= 1; | |
671 | } | |
078086f3 RM |
672 | bmap = bna_rx_rid_mask(&bnad->bna); |
673 | for (i = 0; bmap; i++) { | |
8b230ed8 RM |
674 | if (bmap & 1) |
675 | rxf_active_num++; | |
676 | bmap >>= 1; | |
677 | } | |
678 | count = BNAD_ETHTOOL_STATS_NUM + | |
679 | txf_active_num * BNAD_NUM_TXF_COUNTERS + | |
680 | rxf_active_num * BNAD_NUM_RXF_COUNTERS; | |
681 | ||
682 | for (i = 0; i < bnad->num_rx; i++) { | |
683 | if (!bnad->rx_info[i].rx) | |
684 | continue; | |
685 | count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; | |
686 | count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; | |
687 | for (j = 0; j < bnad->num_rxp_per_rx; j++) | |
688 | if (bnad->rx_info[i].rx_ctrl[j].ccb && | |
689 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | |
690 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) | |
691 | count += BNAD_NUM_RXQ_COUNTERS; | |
692 | } | |
693 | ||
694 | for (i = 0; i < bnad->num_tx; i++) { | |
695 | if (!bnad->tx_info[i].tx) | |
696 | continue; | |
697 | count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; | |
698 | } | |
699 | return count; | |
700 | } | |
701 | ||
702 | static int | |
703 | bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) | |
704 | { | |
705 | int i, j; | |
706 | struct bna_rcb *rcb = NULL; | |
707 | struct bna_tcb *tcb = NULL; | |
708 | ||
709 | for (i = 0; i < bnad->num_rx; i++) { | |
710 | if (!bnad->rx_info[i].rx) | |
711 | continue; | |
712 | for (j = 0; j < bnad->num_rxp_per_rx; j++) | |
713 | if (bnad->rx_info[i].rx_ctrl[j].ccb && | |
714 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && | |
715 | bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { | |
716 | buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. | |
717 | ccb->producer_index; | |
718 | buf[bi++] = 0; /* ccb->consumer_index */ | |
719 | buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. | |
720 | ccb->hw_producer_index); | |
721 | } | |
722 | } | |
723 | for (i = 0; i < bnad->num_rx; i++) { | |
724 | if (!bnad->rx_info[i].rx) | |
725 | continue; | |
726 | for (j = 0; j < bnad->num_rxp_per_rx; j++) | |
727 | if (bnad->rx_info[i].rx_ctrl[j].ccb) { | |
728 | if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && | |
729 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
730 | rcb[0]->rxq) { | |
731 | rcb = bnad->rx_info[i].rx_ctrl[j]. | |
732 | ccb->rcb[0]; | |
733 | buf[bi++] = rcb->rxq->rx_packets; | |
734 | buf[bi++] = rcb->rxq->rx_bytes; | |
735 | buf[bi++] = rcb->rxq-> | |
736 | rx_packets_with_error; | |
737 | buf[bi++] = rcb->rxq-> | |
738 | rxbuf_alloc_failed; | |
739 | buf[bi++] = rcb->producer_index; | |
740 | buf[bi++] = rcb->consumer_index; | |
741 | } | |
742 | if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | |
743 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
744 | rcb[1]->rxq) { | |
745 | rcb = bnad->rx_info[i].rx_ctrl[j]. | |
746 | ccb->rcb[1]; | |
747 | buf[bi++] = rcb->rxq->rx_packets; | |
748 | buf[bi++] = rcb->rxq->rx_bytes; | |
749 | buf[bi++] = rcb->rxq-> | |
750 | rx_packets_with_error; | |
751 | buf[bi++] = rcb->rxq-> | |
752 | rxbuf_alloc_failed; | |
753 | buf[bi++] = rcb->producer_index; | |
754 | buf[bi++] = rcb->consumer_index; | |
755 | } | |
756 | } | |
757 | } | |
758 | ||
759 | for (i = 0; i < bnad->num_tx; i++) { | |
760 | if (!bnad->tx_info[i].tx) | |
761 | continue; | |
762 | for (j = 0; j < bnad->num_txq_per_tx; j++) | |
763 | if (bnad->tx_info[i].tcb[j] && | |
764 | bnad->tx_info[i].tcb[j]->txq) { | |
765 | tcb = bnad->tx_info[i].tcb[j]; | |
766 | buf[bi++] = tcb->txq->tx_packets; | |
767 | buf[bi++] = tcb->txq->tx_bytes; | |
768 | buf[bi++] = tcb->producer_index; | |
769 | buf[bi++] = tcb->consumer_index; | |
770 | buf[bi++] = *(tcb->hw_consumer_index); | |
771 | } | |
772 | } | |
773 | ||
774 | return bi; | |
775 | } | |
776 | ||
777 | static void | |
778 | bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, | |
779 | u64 *buf) | |
780 | { | |
781 | struct bnad *bnad = netdev_priv(netdev); | |
782 | int i, j, bi; | |
250e061e ED |
783 | unsigned long flags; |
784 | struct rtnl_link_stats64 *net_stats64; | |
8b230ed8 | 785 | u64 *stats64; |
078086f3 | 786 | u32 bmap; |
8b230ed8 RM |
787 | |
788 | mutex_lock(&bnad->conf_mutex); | |
789 | if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { | |
790 | mutex_unlock(&bnad->conf_mutex); | |
791 | return; | |
792 | } | |
793 | ||
794 | /* | |
795 | * Used bna_lock to sync reads from bna_stats, which is written | |
796 | * under the same lock | |
797 | */ | |
798 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
799 | bi = 0; | |
800 | memset(buf, 0, stats->n_stats * sizeof(u64)); | |
8b230ed8 | 801 | |
250e061e ED |
802 | net_stats64 = (struct rtnl_link_stats64 *)buf; |
803 | bnad_netdev_qstats_fill(bnad, net_stats64); | |
804 | bnad_netdev_hwstats_fill(bnad, net_stats64); | |
8b230ed8 | 805 | |
250e061e | 806 | bi = sizeof(*net_stats64) / sizeof(u64); |
8b230ed8 | 807 | |
f7c0fa4c RM |
808 | /* Get netif_queue_stopped from stack */ |
809 | bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); | |
810 | ||
8b230ed8 RM |
811 | /* Fill driver stats into ethtool buffers */ |
812 | stats64 = (u64 *)&bnad->stats.drv_stats; | |
813 | for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) | |
814 | buf[bi++] = stats64[i]; | |
815 | ||
816 | /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ | |
078086f3 | 817 | stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; |
8b230ed8 | 818 | for (i = 0; |
078086f3 RM |
819 | i < offsetof(struct bfi_enet_stats, rxf_stats[0]) / |
820 | sizeof(u64); | |
8b230ed8 RM |
821 | i++) |
822 | buf[bi++] = stats64[i]; | |
823 | ||
824 | /* Fill txf stats into ethtool buffers */ | |
078086f3 RM |
825 | bmap = bna_tx_rid_mask(&bnad->bna); |
826 | for (i = 0; bmap; i++) { | |
8b230ed8 RM |
827 | if (bmap & 1) { |
828 | stats64 = (u64 *)&bnad->stats.bna_stats-> | |
078086f3 RM |
829 | hw_stats.txf_stats[i]; |
830 | for (j = 0; j < sizeof(struct bfi_enet_stats_txf) / | |
8b230ed8 RM |
831 | sizeof(u64); j++) |
832 | buf[bi++] = stats64[j]; | |
833 | } | |
834 | bmap >>= 1; | |
835 | } | |
836 | ||
837 | /* Fill rxf stats into ethtool buffers */ | |
078086f3 RM |
838 | bmap = bna_rx_rid_mask(&bnad->bna); |
839 | for (i = 0; bmap; i++) { | |
8b230ed8 RM |
840 | if (bmap & 1) { |
841 | stats64 = (u64 *)&bnad->stats.bna_stats-> | |
078086f3 RM |
842 | hw_stats.rxf_stats[i]; |
843 | for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) / | |
8b230ed8 RM |
844 | sizeof(u64); j++) |
845 | buf[bi++] = stats64[j]; | |
846 | } | |
847 | bmap >>= 1; | |
848 | } | |
849 | ||
850 | /* Fill per Q stats into ethtool buffers */ | |
851 | bi = bnad_per_q_stats_fill(bnad, buf, bi); | |
852 | ||
853 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
854 | ||
855 | mutex_unlock(&bnad->conf_mutex); | |
856 | } | |
857 | ||
858 | static int | |
859 | bnad_get_sset_count(struct net_device *netdev, int sset) | |
860 | { | |
861 | switch (sset) { | |
862 | case ETH_SS_STATS: | |
863 | return bnad_get_stats_count_locked(netdev); | |
864 | default: | |
865 | return -EOPNOTSUPP; | |
866 | } | |
867 | } | |
868 | ||
869 | static struct ethtool_ops bnad_ethtool_ops = { | |
870 | .get_settings = bnad_get_settings, | |
871 | .set_settings = bnad_set_settings, | |
872 | .get_drvinfo = bnad_get_drvinfo, | |
8b230ed8 RM |
873 | .get_wol = bnad_get_wol, |
874 | .get_link = ethtool_op_get_link, | |
875 | .get_coalesce = bnad_get_coalesce, | |
876 | .set_coalesce = bnad_set_coalesce, | |
877 | .get_ringparam = bnad_get_ringparam, | |
878 | .set_ringparam = bnad_set_ringparam, | |
879 | .get_pauseparam = bnad_get_pauseparam, | |
880 | .set_pauseparam = bnad_set_pauseparam, | |
8b230ed8 RM |
881 | .get_strings = bnad_get_strings, |
882 | .get_ethtool_stats = bnad_get_ethtool_stats, | |
883 | .get_sset_count = bnad_get_sset_count | |
884 | }; | |
885 | ||
886 | void | |
887 | bnad_set_ethtool_ops(struct net_device *netdev) | |
888 | { | |
889 | SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops); | |
890 | } |