2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
25 #include "enic_clsf.h"
27 #include "vnic_stats.h"
30 char name[ETH_GSTRING_LEN];
34 #define ENIC_TX_STAT(stat) { \
36 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
39 #define ENIC_RX_STAT(stat) { \
41 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
44 #define ENIC_GEN_STAT(stat) { \
46 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
49 static const struct enic_stat enic_tx_stats[] = {
50 ENIC_TX_STAT(tx_frames_ok),
51 ENIC_TX_STAT(tx_unicast_frames_ok),
52 ENIC_TX_STAT(tx_multicast_frames_ok),
53 ENIC_TX_STAT(tx_broadcast_frames_ok),
54 ENIC_TX_STAT(tx_bytes_ok),
55 ENIC_TX_STAT(tx_unicast_bytes_ok),
56 ENIC_TX_STAT(tx_multicast_bytes_ok),
57 ENIC_TX_STAT(tx_broadcast_bytes_ok),
58 ENIC_TX_STAT(tx_drops),
59 ENIC_TX_STAT(tx_errors),
63 static const struct enic_stat enic_rx_stats[] = {
64 ENIC_RX_STAT(rx_frames_ok),
65 ENIC_RX_STAT(rx_frames_total),
66 ENIC_RX_STAT(rx_unicast_frames_ok),
67 ENIC_RX_STAT(rx_multicast_frames_ok),
68 ENIC_RX_STAT(rx_broadcast_frames_ok),
69 ENIC_RX_STAT(rx_bytes_ok),
70 ENIC_RX_STAT(rx_unicast_bytes_ok),
71 ENIC_RX_STAT(rx_multicast_bytes_ok),
72 ENIC_RX_STAT(rx_broadcast_bytes_ok),
73 ENIC_RX_STAT(rx_drop),
74 ENIC_RX_STAT(rx_no_bufs),
75 ENIC_RX_STAT(rx_errors),
77 ENIC_RX_STAT(rx_crc_errors),
78 ENIC_RX_STAT(rx_frames_64),
79 ENIC_RX_STAT(rx_frames_127),
80 ENIC_RX_STAT(rx_frames_255),
81 ENIC_RX_STAT(rx_frames_511),
82 ENIC_RX_STAT(rx_frames_1023),
83 ENIC_RX_STAT(rx_frames_1518),
84 ENIC_RX_STAT(rx_frames_to_max),
87 static const struct enic_stat enic_gen_stats[] = {
88 ENIC_GEN_STAT(dma_map_error),
91 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
92 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
93 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
95 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
100 for (i = 0; i < enic->rq_count; i++) {
101 intr = enic_msix_rq_intr(enic, i);
102 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
106 static int enic_get_settings(struct net_device *netdev,
107 struct ethtool_cmd *ecmd)
109 struct enic *enic = netdev_priv(netdev);
111 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
112 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
113 ecmd->port = PORT_FIBRE;
114 ecmd->transceiver = XCVR_EXTERNAL;
116 if (netif_carrier_ok(netdev)) {
117 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
118 ecmd->duplex = DUPLEX_FULL;
120 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
121 ecmd->duplex = DUPLEX_UNKNOWN;
124 ecmd->autoneg = AUTONEG_DISABLE;
129 static void enic_get_drvinfo(struct net_device *netdev,
130 struct ethtool_drvinfo *drvinfo)
132 struct enic *enic = netdev_priv(netdev);
133 struct vnic_devcmd_fw_info *fw_info;
136 err = enic_dev_fw_info(enic, &fw_info);
137 /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
138 * For other failures, like devcmd failure, we return previously
144 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
145 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
146 strlcpy(drvinfo->fw_version, fw_info->fw_version,
147 sizeof(drvinfo->fw_version));
148 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
149 sizeof(drvinfo->bus_info));
152 static void enic_get_strings(struct net_device *netdev, u32 stringset,
159 for (i = 0; i < enic_n_tx_stats; i++) {
160 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
161 data += ETH_GSTRING_LEN;
163 for (i = 0; i < enic_n_rx_stats; i++) {
164 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
165 data += ETH_GSTRING_LEN;
167 for (i = 0; i < enic_n_gen_stats; i++) {
168 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
169 data += ETH_GSTRING_LEN;
175 static int enic_get_sset_count(struct net_device *netdev, int sset)
179 return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
185 static void enic_get_ethtool_stats(struct net_device *netdev,
186 struct ethtool_stats *stats, u64 *data)
188 struct enic *enic = netdev_priv(netdev);
189 struct vnic_stats *vstats;
193 err = enic_dev_stats_dump(enic, &vstats);
194 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
195 * For other failures, like devcmd failure, we return previously
201 for (i = 0; i < enic_n_tx_stats; i++)
202 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
203 for (i = 0; i < enic_n_rx_stats; i++)
204 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
205 for (i = 0; i < enic_n_gen_stats; i++)
206 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
209 static u32 enic_get_msglevel(struct net_device *netdev)
211 struct enic *enic = netdev_priv(netdev);
212 return enic->msg_enable;
215 static void enic_set_msglevel(struct net_device *netdev, u32 value)
217 struct enic *enic = netdev_priv(netdev);
218 enic->msg_enable = value;
221 static int enic_get_coalesce(struct net_device *netdev,
222 struct ethtool_coalesce *ecmd)
224 struct enic *enic = netdev_priv(netdev);
225 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
227 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
228 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
229 if (rxcoal->use_adaptive_rx_coalesce)
230 ecmd->use_adaptive_rx_coalesce = 1;
231 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
232 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
237 static int enic_set_coalesce(struct net_device *netdev,
238 struct ethtool_coalesce *ecmd)
240 struct enic *enic = netdev_priv(netdev);
241 u32 tx_coalesce_usecs;
242 u32 rx_coalesce_usecs;
243 u32 rx_coalesce_usecs_low;
244 u32 rx_coalesce_usecs_high;
245 u32 coalesce_usecs_max;
246 unsigned int i, intr;
247 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
249 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
250 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
252 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
255 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
257 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
260 switch (vnic_dev_get_intr_mode(enic->vdev)) {
261 case VNIC_DEV_INTR_MODE_INTX:
262 if (tx_coalesce_usecs != rx_coalesce_usecs)
264 if (ecmd->use_adaptive_rx_coalesce ||
265 ecmd->rx_coalesce_usecs_low ||
266 ecmd->rx_coalesce_usecs_high)
269 intr = enic_legacy_io_intr();
270 vnic_intr_coalescing_timer_set(&enic->intr[intr],
273 case VNIC_DEV_INTR_MODE_MSI:
274 if (tx_coalesce_usecs != rx_coalesce_usecs)
276 if (ecmd->use_adaptive_rx_coalesce ||
277 ecmd->rx_coalesce_usecs_low ||
278 ecmd->rx_coalesce_usecs_high)
281 vnic_intr_coalescing_timer_set(&enic->intr[0],
284 case VNIC_DEV_INTR_MODE_MSIX:
285 if (ecmd->rx_coalesce_usecs_high &&
286 (rx_coalesce_usecs_high <
287 rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
290 for (i = 0; i < enic->wq_count; i++) {
291 intr = enic_msix_wq_intr(enic, i);
292 vnic_intr_coalescing_timer_set(&enic->intr[intr],
296 rxcoal->use_adaptive_rx_coalesce =
297 !!ecmd->use_adaptive_rx_coalesce;
298 if (!rxcoal->use_adaptive_rx_coalesce)
299 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
301 if (ecmd->rx_coalesce_usecs_high) {
302 rxcoal->range_end = rx_coalesce_usecs_high;
303 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
304 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
305 ENIC_AIC_LARGE_PKT_DIFF;
312 enic->tx_coalesce_usecs = tx_coalesce_usecs;
313 enic->rx_coalesce_usecs = rx_coalesce_usecs;
318 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
321 int j, ret = 0, cnt = 0;
323 cmd->data = enic->rfs_h.max - enic->rfs_h.free;
324 for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
325 struct hlist_head *hhead;
326 struct hlist_node *tmp;
327 struct enic_rfs_fltr_node *n;
329 hhead = &enic->rfs_h.ht_head[j];
330 hlist_for_each_entry_safe(n, tmp, hhead, node) {
331 if (cnt == cmd->rule_cnt)
333 rule_locs[cnt] = n->fltr_id;
342 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
344 struct ethtool_rx_flow_spec *fsp =
345 (struct ethtool_rx_flow_spec *)&cmd->fs;
346 struct enic_rfs_fltr_node *n;
348 n = htbl_fltr_search(enic, (u16)fsp->location);
351 switch (n->keys.basic.ip_proto) {
353 fsp->flow_type = TCP_V4_FLOW;
356 fsp->flow_type = UDP_V4_FLOW;
363 fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
364 fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
366 fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
367 fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
369 fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
370 fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
372 fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
373 fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
375 fsp->ring_cookie = n->rq_id;
380 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
383 struct enic *enic = netdev_priv(dev);
387 case ETHTOOL_GRXRINGS:
388 cmd->data = enic->rq_count;
390 case ETHTOOL_GRXCLSRLCNT:
391 spin_lock_bh(&enic->rfs_h.lock);
392 cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
393 cmd->data = enic->rfs_h.max;
394 spin_unlock_bh(&enic->rfs_h.lock);
396 case ETHTOOL_GRXCLSRLALL:
397 spin_lock_bh(&enic->rfs_h.lock);
398 ret = enic_grxclsrlall(enic, cmd, rule_locs);
399 spin_unlock_bh(&enic->rfs_h.lock);
401 case ETHTOOL_GRXCLSRULE:
402 spin_lock_bh(&enic->rfs_h.lock);
403 ret = enic_grxclsrule(enic, cmd);
404 spin_unlock_bh(&enic->rfs_h.lock);
414 static int enic_get_tunable(struct net_device *dev,
415 const struct ethtool_tunable *tuna, void *data)
417 struct enic *enic = netdev_priv(dev);
421 case ETHTOOL_RX_COPYBREAK:
422 *(u32 *)data = enic->rx_copybreak;
432 static int enic_set_tunable(struct net_device *dev,
433 const struct ethtool_tunable *tuna,
436 struct enic *enic = netdev_priv(dev);
440 case ETHTOOL_RX_COPYBREAK:
441 enic->rx_copybreak = *(u32 *)data;
451 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
456 static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
459 struct enic *enic = netdev_priv(netdev);
462 memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
465 *hfunc = ETH_RSS_HASH_TOP;
470 static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
471 const u8 *hkey, const u8 hfunc)
473 struct enic *enic = netdev_priv(netdev);
475 if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
480 memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
482 return __enic_set_rsskey(enic);
485 static const struct ethtool_ops enic_ethtool_ops = {
486 .get_settings = enic_get_settings,
487 .get_drvinfo = enic_get_drvinfo,
488 .get_msglevel = enic_get_msglevel,
489 .set_msglevel = enic_set_msglevel,
490 .get_link = ethtool_op_get_link,
491 .get_strings = enic_get_strings,
492 .get_sset_count = enic_get_sset_count,
493 .get_ethtool_stats = enic_get_ethtool_stats,
494 .get_coalesce = enic_get_coalesce,
495 .set_coalesce = enic_set_coalesce,
496 .get_rxnfc = enic_get_rxnfc,
497 .get_tunable = enic_get_tunable,
498 .set_tunable = enic_set_tunable,
499 .get_rxfh_key_size = enic_get_rxfh_key_size,
500 .get_rxfh = enic_get_rxfh,
501 .set_rxfh = enic_set_rxfh,
504 void enic_set_ethtool_ops(struct net_device *netdev)
506 netdev->ethtool_ops = &enic_ethtool_ops;