bna: Move the Brocade driver
[linux-2.6-block.git] / drivers / net / ethernet / brocade / bna / bnad_ethtool.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "cna.h"
20
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/rtnetlink.h>
25
26 #include "bna.h"
27
28 #include "bnad.h"
29
30 #define BNAD_NUM_TXF_COUNTERS 12
31 #define BNAD_NUM_RXF_COUNTERS 10
32 #define BNAD_NUM_CQ_COUNTERS 3
33 #define BNAD_NUM_RXQ_COUNTERS 6
34 #define BNAD_NUM_TXQ_COUNTERS 5
35
36 #define BNAD_ETHTOOL_STATS_NUM                                          \
37         (sizeof(struct rtnl_link_stats64) / sizeof(u64) +       \
38         sizeof(struct bnad_drv_stats) / sizeof(u64) +           \
39         offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
40
41 static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
42         "rx_packets",
43         "tx_packets",
44         "rx_bytes",
45         "tx_bytes",
46         "rx_errors",
47         "tx_errors",
48         "rx_dropped",
49         "tx_dropped",
50         "multicast",
51         "collisions",
52
53         "rx_length_errors",
54         "rx_over_errors",
55         "rx_crc_errors",
56         "rx_frame_errors",
57         "rx_fifo_errors",
58         "rx_missed_errors",
59
60         "tx_aborted_errors",
61         "tx_carrier_errors",
62         "tx_fifo_errors",
63         "tx_heartbeat_errors",
64         "tx_window_errors",
65
66         "rx_compressed",
67         "tx_compressed",
68
69         "netif_queue_stop",
70         "netif_queue_wakeup",
71         "netif_queue_stopped",
72         "tso4",
73         "tso6",
74         "tso_err",
75         "tcpcsum_offload",
76         "udpcsum_offload",
77         "csum_help",
78         "csum_help_err",
79         "hw_stats_updates",
80         "netif_rx_schedule",
81         "netif_rx_complete",
82         "netif_rx_dropped",
83
84         "link_toggle",
85         "cee_up",
86
87         "rxp_info_alloc_failed",
88         "mbox_intr_disabled",
89         "mbox_intr_enabled",
90         "tx_unmap_q_alloc_failed",
91         "rx_unmap_q_alloc_failed",
92         "rxbuf_alloc_failed",
93
94         "mac_frame_64",
95         "mac_frame_65_127",
96         "mac_frame_128_255",
97         "mac_frame_256_511",
98         "mac_frame_512_1023",
99         "mac_frame_1024_1518",
100         "mac_frame_1518_1522",
101         "mac_rx_bytes",
102         "mac_rx_packets",
103         "mac_rx_fcs_error",
104         "mac_rx_multicast",
105         "mac_rx_broadcast",
106         "mac_rx_control_frames",
107         "mac_rx_pause",
108         "mac_rx_unknown_opcode",
109         "mac_rx_alignment_error",
110         "mac_rx_frame_length_error",
111         "mac_rx_code_error",
112         "mac_rx_carrier_sense_error",
113         "mac_rx_undersize",
114         "mac_rx_oversize",
115         "mac_rx_fragments",
116         "mac_rx_jabber",
117         "mac_rx_drop",
118
119         "mac_tx_bytes",
120         "mac_tx_packets",
121         "mac_tx_multicast",
122         "mac_tx_broadcast",
123         "mac_tx_pause",
124         "mac_tx_deferral",
125         "mac_tx_excessive_deferral",
126         "mac_tx_single_collision",
127         "mac_tx_muliple_collision",
128         "mac_tx_late_collision",
129         "mac_tx_excessive_collision",
130         "mac_tx_total_collision",
131         "mac_tx_pause_honored",
132         "mac_tx_drop",
133         "mac_tx_jabber",
134         "mac_tx_fcs_error",
135         "mac_tx_control_frame",
136         "mac_tx_oversize",
137         "mac_tx_undersize",
138         "mac_tx_fragments",
139
140         "bpc_tx_pause_0",
141         "bpc_tx_pause_1",
142         "bpc_tx_pause_2",
143         "bpc_tx_pause_3",
144         "bpc_tx_pause_4",
145         "bpc_tx_pause_5",
146         "bpc_tx_pause_6",
147         "bpc_tx_pause_7",
148         "bpc_tx_zero_pause_0",
149         "bpc_tx_zero_pause_1",
150         "bpc_tx_zero_pause_2",
151         "bpc_tx_zero_pause_3",
152         "bpc_tx_zero_pause_4",
153         "bpc_tx_zero_pause_5",
154         "bpc_tx_zero_pause_6",
155         "bpc_tx_zero_pause_7",
156         "bpc_tx_first_pause_0",
157         "bpc_tx_first_pause_1",
158         "bpc_tx_first_pause_2",
159         "bpc_tx_first_pause_3",
160         "bpc_tx_first_pause_4",
161         "bpc_tx_first_pause_5",
162         "bpc_tx_first_pause_6",
163         "bpc_tx_first_pause_7",
164
165         "bpc_rx_pause_0",
166         "bpc_rx_pause_1",
167         "bpc_rx_pause_2",
168         "bpc_rx_pause_3",
169         "bpc_rx_pause_4",
170         "bpc_rx_pause_5",
171         "bpc_rx_pause_6",
172         "bpc_rx_pause_7",
173         "bpc_rx_zero_pause_0",
174         "bpc_rx_zero_pause_1",
175         "bpc_rx_zero_pause_2",
176         "bpc_rx_zero_pause_3",
177         "bpc_rx_zero_pause_4",
178         "bpc_rx_zero_pause_5",
179         "bpc_rx_zero_pause_6",
180         "bpc_rx_zero_pause_7",
181         "bpc_rx_first_pause_0",
182         "bpc_rx_first_pause_1",
183         "bpc_rx_first_pause_2",
184         "bpc_rx_first_pause_3",
185         "bpc_rx_first_pause_4",
186         "bpc_rx_first_pause_5",
187         "bpc_rx_first_pause_6",
188         "bpc_rx_first_pause_7",
189
190         "rad_rx_frames",
191         "rad_rx_octets",
192         "rad_rx_vlan_frames",
193         "rad_rx_ucast",
194         "rad_rx_ucast_octets",
195         "rad_rx_ucast_vlan",
196         "rad_rx_mcast",
197         "rad_rx_mcast_octets",
198         "rad_rx_mcast_vlan",
199         "rad_rx_bcast",
200         "rad_rx_bcast_octets",
201         "rad_rx_bcast_vlan",
202         "rad_rx_drops",
203
204         "fc_rx_ucast_octets",
205         "fc_rx_ucast",
206         "fc_rx_ucast_vlan",
207         "fc_rx_mcast_octets",
208         "fc_rx_mcast",
209         "fc_rx_mcast_vlan",
210         "fc_rx_bcast_octets",
211         "fc_rx_bcast",
212         "fc_rx_bcast_vlan",
213
214         "fc_tx_ucast_octets",
215         "fc_tx_ucast",
216         "fc_tx_ucast_vlan",
217         "fc_tx_mcast_octets",
218         "fc_tx_mcast",
219         "fc_tx_mcast_vlan",
220         "fc_tx_bcast_octets",
221         "fc_tx_bcast",
222         "fc_tx_bcast_vlan",
223         "fc_tx_parity_errors",
224         "fc_tx_timeout",
225         "fc_tx_fid_parity_errors",
226 };
227
228 static int
229 bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
230 {
231         cmd->supported = SUPPORTED_10000baseT_Full;
232         cmd->advertising = ADVERTISED_10000baseT_Full;
233         cmd->autoneg = AUTONEG_DISABLE;
234         cmd->supported |= SUPPORTED_FIBRE;
235         cmd->advertising |= ADVERTISED_FIBRE;
236         cmd->port = PORT_FIBRE;
237         cmd->phy_address = 0;
238
239         if (netif_carrier_ok(netdev)) {
240                 ethtool_cmd_speed_set(cmd, SPEED_10000);
241                 cmd->duplex = DUPLEX_FULL;
242         } else {
243                 ethtool_cmd_speed_set(cmd, -1);
244                 cmd->duplex = -1;
245         }
246         cmd->transceiver = XCVR_EXTERNAL;
247         cmd->maxtxpkt = 0;
248         cmd->maxrxpkt = 0;
249
250         return 0;
251 }
252
253 static int
254 bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
255 {
256         /* 10G full duplex setting supported only */
257         if (cmd->autoneg == AUTONEG_ENABLE)
258                 return -EOPNOTSUPP; else {
259                 if ((ethtool_cmd_speed(cmd) == SPEED_10000)
260                     && (cmd->duplex == DUPLEX_FULL))
261                         return 0;
262         }
263
264         return -EOPNOTSUPP;
265 }
266
267 static void
268 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
269 {
270         struct bnad *bnad = netdev_priv(netdev);
271         struct bfa_ioc_attr *ioc_attr;
272         unsigned long flags;
273
274         strcpy(drvinfo->driver, BNAD_NAME);
275         strcpy(drvinfo->version, BNAD_VERSION);
276
277         ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
278         if (ioc_attr) {
279                 spin_lock_irqsave(&bnad->bna_lock, flags);
280                 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
281                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
282
283                 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
284                         sizeof(drvinfo->fw_version) - 1);
285                 kfree(ioc_attr);
286         }
287
288         strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
289 }
290
291 static void
292 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
293 {
294         wolinfo->supported = 0;
295         wolinfo->wolopts = 0;
296 }
297
298 static int
299 bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
300 {
301         struct bnad *bnad = netdev_priv(netdev);
302         unsigned long flags;
303
304         /* Lock rqd. to access bnad->bna_lock */
305         spin_lock_irqsave(&bnad->bna_lock, flags);
306         coalesce->use_adaptive_rx_coalesce =
307                 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
308         spin_unlock_irqrestore(&bnad->bna_lock, flags);
309
310         coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
311                                         BFI_COALESCING_TIMER_UNIT;
312         coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
313                                         BFI_COALESCING_TIMER_UNIT;
314         coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
315
316         return 0;
317 }
318
319 static int
320 bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
321 {
322         struct bnad *bnad = netdev_priv(netdev);
323         unsigned long flags;
324         int dim_timer_del = 0;
325
326         if (coalesce->rx_coalesce_usecs == 0 ||
327             coalesce->rx_coalesce_usecs >
328             BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
329                 return -EINVAL;
330
331         if (coalesce->tx_coalesce_usecs == 0 ||
332             coalesce->tx_coalesce_usecs >
333             BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
334                 return -EINVAL;
335
336         mutex_lock(&bnad->conf_mutex);
337         /*
338          * Do not need to store rx_coalesce_usecs here
339          * Every time DIM is disabled, we can get it from the
340          * stack.
341          */
342         spin_lock_irqsave(&bnad->bna_lock, flags);
343         if (coalesce->use_adaptive_rx_coalesce) {
344                 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
345                         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
346                         bnad_dim_timer_start(bnad);
347                 }
348         } else {
349                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
350                         bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
351                         dim_timer_del = bnad_dim_timer_running(bnad);
352                         if (dim_timer_del) {
353                                 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
354                                                         &bnad->run_flags);
355                                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
356                                 del_timer_sync(&bnad->dim_timer);
357                                 spin_lock_irqsave(&bnad->bna_lock, flags);
358                         }
359                         bnad_rx_coalescing_timeo_set(bnad);
360                 }
361         }
362         if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
363                                         BFI_COALESCING_TIMER_UNIT) {
364                 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
365                                                 BFI_COALESCING_TIMER_UNIT;
366                 bnad_tx_coalescing_timeo_set(bnad);
367         }
368
369         if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
370                                         BFI_COALESCING_TIMER_UNIT) {
371                 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
372                                                 BFI_COALESCING_TIMER_UNIT;
373
374                 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
375                         bnad_rx_coalescing_timeo_set(bnad);
376
377         }
378
379         /* Add Tx Inter-pkt DMA count?  */
380
381         spin_unlock_irqrestore(&bnad->bna_lock, flags);
382
383         mutex_unlock(&bnad->conf_mutex);
384         return 0;
385 }
386
387 static void
388 bnad_get_ringparam(struct net_device *netdev,
389                    struct ethtool_ringparam *ringparam)
390 {
391         struct bnad *bnad = netdev_priv(netdev);
392
393         ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
394         ringparam->rx_mini_max_pending = 0;
395         ringparam->rx_jumbo_max_pending = 0;
396         ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
397
398         ringparam->rx_pending = bnad->rxq_depth;
399         ringparam->rx_mini_max_pending = 0;
400         ringparam->rx_jumbo_max_pending = 0;
401         ringparam->tx_pending = bnad->txq_depth;
402 }
403
404 static int
405 bnad_set_ringparam(struct net_device *netdev,
406                    struct ethtool_ringparam *ringparam)
407 {
408         int i, current_err, err = 0;
409         struct bnad *bnad = netdev_priv(netdev);
410
411         mutex_lock(&bnad->conf_mutex);
412         if (ringparam->rx_pending == bnad->rxq_depth &&
413             ringparam->tx_pending == bnad->txq_depth) {
414                 mutex_unlock(&bnad->conf_mutex);
415                 return 0;
416         }
417
418         if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
419             ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
420             !BNA_POWER_OF_2(ringparam->rx_pending)) {
421                 mutex_unlock(&bnad->conf_mutex);
422                 return -EINVAL;
423         }
424         if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
425             ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
426             !BNA_POWER_OF_2(ringparam->tx_pending)) {
427                 mutex_unlock(&bnad->conf_mutex);
428                 return -EINVAL;
429         }
430
431         if (ringparam->rx_pending != bnad->rxq_depth) {
432                 bnad->rxq_depth = ringparam->rx_pending;
433                 for (i = 0; i < bnad->num_rx; i++) {
434                         if (!bnad->rx_info[i].rx)
435                                 continue;
436                         bnad_cleanup_rx(bnad, i);
437                         current_err = bnad_setup_rx(bnad, i);
438                         if (current_err && !err)
439                                 err = current_err;
440                 }
441         }
442         if (ringparam->tx_pending != bnad->txq_depth) {
443                 bnad->txq_depth = ringparam->tx_pending;
444                 for (i = 0; i < bnad->num_tx; i++) {
445                         if (!bnad->tx_info[i].tx)
446                                 continue;
447                         bnad_cleanup_tx(bnad, i);
448                         current_err = bnad_setup_tx(bnad, i);
449                         if (current_err && !err)
450                                 err = current_err;
451                 }
452         }
453
454         mutex_unlock(&bnad->conf_mutex);
455         return err;
456 }
457
458 static void
459 bnad_get_pauseparam(struct net_device *netdev,
460                     struct ethtool_pauseparam *pauseparam)
461 {
462         struct bnad *bnad = netdev_priv(netdev);
463
464         pauseparam->autoneg = 0;
465         pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
466         pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
467 }
468
469 static int
470 bnad_set_pauseparam(struct net_device *netdev,
471                     struct ethtool_pauseparam *pauseparam)
472 {
473         struct bnad *bnad = netdev_priv(netdev);
474         struct bna_pause_config pause_config;
475         unsigned long flags;
476
477         if (pauseparam->autoneg == AUTONEG_ENABLE)
478                 return -EINVAL;
479
480         mutex_lock(&bnad->conf_mutex);
481         if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
482             pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
483                 pause_config.rx_pause = pauseparam->rx_pause;
484                 pause_config.tx_pause = pauseparam->tx_pause;
485                 spin_lock_irqsave(&bnad->bna_lock, flags);
486                 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
487                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
488         }
489         mutex_unlock(&bnad->conf_mutex);
490         return 0;
491 }
492
493 static void
494 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
495 {
496         struct bnad *bnad = netdev_priv(netdev);
497         int i, j, q_num;
498         u64 bmap;
499
500         mutex_lock(&bnad->conf_mutex);
501
502         switch (stringset) {
503         case ETH_SS_STATS:
504                 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
505                         BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
506                                    ETH_GSTRING_LEN));
507                         memcpy(string, bnad_net_stats_strings[i],
508                                ETH_GSTRING_LEN);
509                         string += ETH_GSTRING_LEN;
510                 }
511                 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
512                         ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
513                 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
514                         if (bmap & 1) {
515                                 sprintf(string, "txf%d_ucast_octets", i);
516                                 string += ETH_GSTRING_LEN;
517                                 sprintf(string, "txf%d_ucast", i);
518                                 string += ETH_GSTRING_LEN;
519                                 sprintf(string, "txf%d_ucast_vlan", i);
520                                 string += ETH_GSTRING_LEN;
521                                 sprintf(string, "txf%d_mcast_octets", i);
522                                 string += ETH_GSTRING_LEN;
523                                 sprintf(string, "txf%d_mcast", i);
524                                 string += ETH_GSTRING_LEN;
525                                 sprintf(string, "txf%d_mcast_vlan", i);
526                                 string += ETH_GSTRING_LEN;
527                                 sprintf(string, "txf%d_bcast_octets", i);
528                                 string += ETH_GSTRING_LEN;
529                                 sprintf(string, "txf%d_bcast", i);
530                                 string += ETH_GSTRING_LEN;
531                                 sprintf(string, "txf%d_bcast_vlan", i);
532                                 string += ETH_GSTRING_LEN;
533                                 sprintf(string, "txf%d_errors", i);
534                                 string += ETH_GSTRING_LEN;
535                                 sprintf(string, "txf%d_filter_vlan", i);
536                                 string += ETH_GSTRING_LEN;
537                                 sprintf(string, "txf%d_filter_mac_sa", i);
538                                 string += ETH_GSTRING_LEN;
539                         }
540                         bmap >>= 1;
541                 }
542
543                 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
544                         ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
545                 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
546                         if (bmap & 1) {
547                                 sprintf(string, "rxf%d_ucast_octets", i);
548                                 string += ETH_GSTRING_LEN;
549                                 sprintf(string, "rxf%d_ucast", i);
550                                 string += ETH_GSTRING_LEN;
551                                 sprintf(string, "rxf%d_ucast_vlan", i);
552                                 string += ETH_GSTRING_LEN;
553                                 sprintf(string, "rxf%d_mcast_octets", i);
554                                 string += ETH_GSTRING_LEN;
555                                 sprintf(string, "rxf%d_mcast", i);
556                                 string += ETH_GSTRING_LEN;
557                                 sprintf(string, "rxf%d_mcast_vlan", i);
558                                 string += ETH_GSTRING_LEN;
559                                 sprintf(string, "rxf%d_bcast_octets", i);
560                                 string += ETH_GSTRING_LEN;
561                                 sprintf(string, "rxf%d_bcast", i);
562                                 string += ETH_GSTRING_LEN;
563                                 sprintf(string, "rxf%d_bcast_vlan", i);
564                                 string += ETH_GSTRING_LEN;
565                                 sprintf(string, "rxf%d_frame_drops", i);
566                                 string += ETH_GSTRING_LEN;
567                         }
568                         bmap >>= 1;
569                 }
570
571                 q_num = 0;
572                 for (i = 0; i < bnad->num_rx; i++) {
573                         if (!bnad->rx_info[i].rx)
574                                 continue;
575                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
576                                 sprintf(string, "cq%d_producer_index", q_num);
577                                 string += ETH_GSTRING_LEN;
578                                 sprintf(string, "cq%d_consumer_index", q_num);
579                                 string += ETH_GSTRING_LEN;
580                                 sprintf(string, "cq%d_hw_producer_index",
581                                         q_num);
582                                 string += ETH_GSTRING_LEN;
583                                 q_num++;
584                         }
585                 }
586
587                 q_num = 0;
588                 for (i = 0; i < bnad->num_rx; i++) {
589                         if (!bnad->rx_info[i].rx)
590                                 continue;
591                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
592                                 sprintf(string, "rxq%d_packets", q_num);
593                                 string += ETH_GSTRING_LEN;
594                                 sprintf(string, "rxq%d_bytes", q_num);
595                                 string += ETH_GSTRING_LEN;
596                                 sprintf(string, "rxq%d_packets_with_error",
597                                                                 q_num);
598                                 string += ETH_GSTRING_LEN;
599                                 sprintf(string, "rxq%d_allocbuf_failed", q_num);
600                                 string += ETH_GSTRING_LEN;
601                                 sprintf(string, "rxq%d_producer_index", q_num);
602                                 string += ETH_GSTRING_LEN;
603                                 sprintf(string, "rxq%d_consumer_index", q_num);
604                                 string += ETH_GSTRING_LEN;
605                                 q_num++;
606                                 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
607                                         bnad->rx_info[i].rx_ctrl[j].ccb->
608                                         rcb[1] &&
609                                         bnad->rx_info[i].rx_ctrl[j].ccb->
610                                         rcb[1]->rxq) {
611                                         sprintf(string, "rxq%d_packets", q_num);
612                                         string += ETH_GSTRING_LEN;
613                                         sprintf(string, "rxq%d_bytes", q_num);
614                                         string += ETH_GSTRING_LEN;
615                                         sprintf(string,
616                                         "rxq%d_packets_with_error", q_num);
617                                         string += ETH_GSTRING_LEN;
618                                         sprintf(string, "rxq%d_allocbuf_failed",
619                                                                 q_num);
620                                         string += ETH_GSTRING_LEN;
621                                         sprintf(string, "rxq%d_producer_index",
622                                                                 q_num);
623                                         string += ETH_GSTRING_LEN;
624                                         sprintf(string, "rxq%d_consumer_index",
625                                                                 q_num);
626                                         string += ETH_GSTRING_LEN;
627                                         q_num++;
628                                 }
629                         }
630                 }
631
632                 q_num = 0;
633                 for (i = 0; i < bnad->num_tx; i++) {
634                         if (!bnad->tx_info[i].tx)
635                                 continue;
636                         for (j = 0; j < bnad->num_txq_per_tx; j++) {
637                                 sprintf(string, "txq%d_packets", q_num);
638                                 string += ETH_GSTRING_LEN;
639                                 sprintf(string, "txq%d_bytes", q_num);
640                                 string += ETH_GSTRING_LEN;
641                                 sprintf(string, "txq%d_producer_index", q_num);
642                                 string += ETH_GSTRING_LEN;
643                                 sprintf(string, "txq%d_consumer_index", q_num);
644                                 string += ETH_GSTRING_LEN;
645                                 sprintf(string, "txq%d_hw_consumer_index",
646                                                                         q_num);
647                                 string += ETH_GSTRING_LEN;
648                                 q_num++;
649                         }
650                 }
651
652                 break;
653
654         default:
655                 break;
656         }
657
658         mutex_unlock(&bnad->conf_mutex);
659 }
660
661 static int
662 bnad_get_stats_count_locked(struct net_device *netdev)
663 {
664         struct bnad *bnad = netdev_priv(netdev);
665         int i, j, count, rxf_active_num = 0, txf_active_num = 0;
666         u64 bmap;
667
668         bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
669                         ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
670         for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
671                 if (bmap & 1)
672                         txf_active_num++;
673                 bmap >>= 1;
674         }
675         bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
676                         ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
677         for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
678                 if (bmap & 1)
679                         rxf_active_num++;
680                 bmap >>= 1;
681         }
682         count = BNAD_ETHTOOL_STATS_NUM +
683                 txf_active_num * BNAD_NUM_TXF_COUNTERS +
684                 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
685
686         for (i = 0; i < bnad->num_rx; i++) {
687                 if (!bnad->rx_info[i].rx)
688                         continue;
689                 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
690                 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
691                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
692                         if (bnad->rx_info[i].rx_ctrl[j].ccb &&
693                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
694                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
695                                 count +=  BNAD_NUM_RXQ_COUNTERS;
696         }
697
698         for (i = 0; i < bnad->num_tx; i++) {
699                 if (!bnad->tx_info[i].tx)
700                         continue;
701                 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
702         }
703         return count;
704 }
705
706 static int
707 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
708 {
709         int i, j;
710         struct bna_rcb *rcb = NULL;
711         struct bna_tcb *tcb = NULL;
712
713         for (i = 0; i < bnad->num_rx; i++) {
714                 if (!bnad->rx_info[i].rx)
715                         continue;
716                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
717                         if (bnad->rx_info[i].rx_ctrl[j].ccb &&
718                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
719                                 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
720                                 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
721                                                 ccb->producer_index;
722                                 buf[bi++] = 0; /* ccb->consumer_index */
723                                 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
724                                                 ccb->hw_producer_index);
725                         }
726         }
727         for (i = 0; i < bnad->num_rx; i++) {
728                 if (!bnad->rx_info[i].rx)
729                         continue;
730                 for (j = 0; j < bnad->num_rxp_per_rx; j++)
731                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
732                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
733                                         bnad->rx_info[i].rx_ctrl[j].ccb->
734                                         rcb[0]->rxq) {
735                                         rcb = bnad->rx_info[i].rx_ctrl[j].
736                                                         ccb->rcb[0];
737                                         buf[bi++] = rcb->rxq->rx_packets;
738                                         buf[bi++] = rcb->rxq->rx_bytes;
739                                         buf[bi++] = rcb->rxq->
740                                                         rx_packets_with_error;
741                                         buf[bi++] = rcb->rxq->
742                                                         rxbuf_alloc_failed;
743                                         buf[bi++] = rcb->producer_index;
744                                         buf[bi++] = rcb->consumer_index;
745                                 }
746                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
747                                         bnad->rx_info[i].rx_ctrl[j].ccb->
748                                         rcb[1]->rxq) {
749                                         rcb = bnad->rx_info[i].rx_ctrl[j].
750                                                                 ccb->rcb[1];
751                                         buf[bi++] = rcb->rxq->rx_packets;
752                                         buf[bi++] = rcb->rxq->rx_bytes;
753                                         buf[bi++] = rcb->rxq->
754                                                         rx_packets_with_error;
755                                         buf[bi++] = rcb->rxq->
756                                                         rxbuf_alloc_failed;
757                                         buf[bi++] = rcb->producer_index;
758                                         buf[bi++] = rcb->consumer_index;
759                                 }
760                         }
761         }
762
763         for (i = 0; i < bnad->num_tx; i++) {
764                 if (!bnad->tx_info[i].tx)
765                         continue;
766                 for (j = 0; j < bnad->num_txq_per_tx; j++)
767                         if (bnad->tx_info[i].tcb[j] &&
768                                 bnad->tx_info[i].tcb[j]->txq) {
769                                 tcb = bnad->tx_info[i].tcb[j];
770                                 buf[bi++] = tcb->txq->tx_packets;
771                                 buf[bi++] = tcb->txq->tx_bytes;
772                                 buf[bi++] = tcb->producer_index;
773                                 buf[bi++] = tcb->consumer_index;
774                                 buf[bi++] = *(tcb->hw_consumer_index);
775                         }
776         }
777
778         return bi;
779 }
780
781 static void
782 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
783                        u64 *buf)
784 {
785         struct bnad *bnad = netdev_priv(netdev);
786         int i, j, bi;
787         unsigned long flags;
788         struct rtnl_link_stats64 *net_stats64;
789         u64 *stats64;
790         u64 bmap;
791
792         mutex_lock(&bnad->conf_mutex);
793         if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
794                 mutex_unlock(&bnad->conf_mutex);
795                 return;
796         }
797
798         /*
799          * Used bna_lock to sync reads from bna_stats, which is written
800          * under the same lock
801          */
802         spin_lock_irqsave(&bnad->bna_lock, flags);
803         bi = 0;
804         memset(buf, 0, stats->n_stats * sizeof(u64));
805
806         net_stats64 = (struct rtnl_link_stats64 *)buf;
807         bnad_netdev_qstats_fill(bnad, net_stats64);
808         bnad_netdev_hwstats_fill(bnad, net_stats64);
809
810         bi = sizeof(*net_stats64) / sizeof(u64);
811
812         /* Get netif_queue_stopped from stack */
813         bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
814
815         /* Fill driver stats into ethtool buffers */
816         stats64 = (u64 *)&bnad->stats.drv_stats;
817         for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
818                 buf[bi++] = stats64[i];
819
820         /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
821         stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
822         for (i = 0;
823              i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
824              i++)
825                 buf[bi++] = stats64[i];
826
827         /* Fill txf stats into ethtool buffers */
828         bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
829                         ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
830         for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
831                 if (bmap & 1) {
832                         stats64 = (u64 *)&bnad->stats.bna_stats->
833                                                 hw_stats->txf_stats[i];
834                         for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
835                                         sizeof(u64); j++)
836                                 buf[bi++] = stats64[j];
837                 }
838                 bmap >>= 1;
839         }
840
841         /*  Fill rxf stats into ethtool buffers */
842         bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
843                         ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
844         for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
845                 if (bmap & 1) {
846                         stats64 = (u64 *)&bnad->stats.bna_stats->
847                                                 hw_stats->rxf_stats[i];
848                         for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
849                                         sizeof(u64); j++)
850                                 buf[bi++] = stats64[j];
851                 }
852                 bmap >>= 1;
853         }
854
855         /* Fill per Q stats into ethtool buffers */
856         bi = bnad_per_q_stats_fill(bnad, buf, bi);
857
858         spin_unlock_irqrestore(&bnad->bna_lock, flags);
859
860         mutex_unlock(&bnad->conf_mutex);
861 }
862
863 static int
864 bnad_get_sset_count(struct net_device *netdev, int sset)
865 {
866         switch (sset) {
867         case ETH_SS_STATS:
868                 return bnad_get_stats_count_locked(netdev);
869         default:
870                 return -EOPNOTSUPP;
871         }
872 }
873
874 static struct ethtool_ops bnad_ethtool_ops = {
875         .get_settings = bnad_get_settings,
876         .set_settings = bnad_set_settings,
877         .get_drvinfo = bnad_get_drvinfo,
878         .get_wol = bnad_get_wol,
879         .get_link = ethtool_op_get_link,
880         .get_coalesce = bnad_get_coalesce,
881         .set_coalesce = bnad_set_coalesce,
882         .get_ringparam = bnad_get_ringparam,
883         .set_ringparam = bnad_set_ringparam,
884         .get_pauseparam = bnad_get_pauseparam,
885         .set_pauseparam = bnad_set_pauseparam,
886         .get_strings = bnad_get_strings,
887         .get_ethtool_stats = bnad_get_ethtool_stats,
888         .get_sset_count = bnad_get_sset_count
889 };
890
891 void
892 bnad_set_ethtool_ops(struct net_device *netdev)
893 {
894         SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
895 }