1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/core/ethtool.c - Ethtool ioctl handler
4 * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
6 * This file is where we call all the ethtool_ops commands to get
7 * the information ethtool needs.
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/ethtool.h>
15 #include <linux/netdevice.h>
16 #include <linux/net_tstamp.h>
17 #include <linux/phy.h>
18 #include <linux/bitops.h>
19 #include <linux/uaccess.h>
20 #include <linux/vmalloc.h>
21 #include <linux/sfp.h>
22 #include <linux/slab.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/sched/signal.h>
25 #include <linux/net.h>
26 #include <net/devlink.h>
27 #include <net/xdp_sock.h>
28 #include <net/flow_offload.h>
31 * Some useful ethtool_ops methods that're device independent.
32 * If we find that all drivers want to do the same thing here,
33 * we can turn these into dev_() function calls.
36 u32 ethtool_op_get_link(struct net_device *dev)
38 return netif_carrier_ok(dev) ? 1 : 0;
40 EXPORT_SYMBOL(ethtool_op_get_link);
42 int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
44 info->so_timestamping =
45 SOF_TIMESTAMPING_TX_SOFTWARE |
46 SOF_TIMESTAMPING_RX_SOFTWARE |
47 SOF_TIMESTAMPING_SOFTWARE;
51 EXPORT_SYMBOL(ethtool_op_get_ts_info);
53 /* Handlers for each ethtool command */
55 #define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
57 static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
58 [NETIF_F_SG_BIT] = "tx-scatter-gather",
59 [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4",
60 [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic",
61 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
62 [NETIF_F_HIGHDMA_BIT] = "highdma",
63 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
64 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert",
66 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse",
67 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
68 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert",
69 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse",
70 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
71 [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged",
72 [NETIF_F_GSO_BIT] = "tx-generic-segmentation",
73 [NETIF_F_LLTX_BIT] = "tx-lockless",
74 [NETIF_F_NETNS_LOCAL_BIT] = "netns-local",
75 [NETIF_F_GRO_BIT] = "rx-gro",
76 [NETIF_F_GRO_HW_BIT] = "rx-gro-hw",
77 [NETIF_F_LRO_BIT] = "rx-lro",
79 [NETIF_F_TSO_BIT] = "tx-tcp-segmentation",
80 [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust",
81 [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation",
82 [NETIF_F_TSO_MANGLEID_BIT] = "tx-tcp-mangleid-segmentation",
83 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
84 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
85 [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
86 [NETIF_F_GSO_GRE_CSUM_BIT] = "tx-gre-csum-segmentation",
87 [NETIF_F_GSO_IPXIP4_BIT] = "tx-ipxip4-segmentation",
88 [NETIF_F_GSO_IPXIP6_BIT] = "tx-ipxip6-segmentation",
89 [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
90 [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
91 [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
92 [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
93 [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation",
94 [NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation",
96 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
97 [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
98 [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu",
99 [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter",
100 [NETIF_F_RXHASH_BIT] = "rx-hashing",
101 [NETIF_F_RXCSUM_BIT] = "rx-checksum",
102 [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy",
103 [NETIF_F_LOOPBACK_BIT] = "loopback",
104 [NETIF_F_RXFCS_BIT] = "rx-fcs",
105 [NETIF_F_RXALL_BIT] = "rx-all",
106 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
107 [NETIF_F_HW_TC_BIT] = "hw-tc-offload",
108 [NETIF_F_HW_ESP_BIT] = "esp-hw-offload",
109 [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload",
110 [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload",
111 [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record",
112 [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload",
113 [NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload",
117 rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = {
118 [ETH_RSS_HASH_TOP_BIT] = "toeplitz",
119 [ETH_RSS_HASH_XOR_BIT] = "xor",
120 [ETH_RSS_HASH_CRC32_BIT] = "crc32",
124 tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
125 [ETHTOOL_ID_UNSPEC] = "Unspec",
126 [ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
127 [ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
128 [ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout",
132 phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
133 [ETHTOOL_ID_UNSPEC] = "Unspec",
134 [ETHTOOL_PHY_DOWNSHIFT] = "phy-downshift",
135 [ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down",
138 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
140 struct ethtool_gfeatures cmd = {
141 .cmd = ETHTOOL_GFEATURES,
142 .size = ETHTOOL_DEV_FEATURE_WORDS,
144 struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
145 u32 __user *sizeaddr;
149 /* in case feature bits run out again */
150 BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t));
152 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
153 features[i].available = (u32)(dev->hw_features >> (32 * i));
154 features[i].requested = (u32)(dev->wanted_features >> (32 * i));
155 features[i].active = (u32)(dev->features >> (32 * i));
156 features[i].never_changed =
157 (u32)(NETIF_F_NEVER_CHANGE >> (32 * i));
160 sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
161 if (get_user(copy_size, sizeaddr))
164 if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
165 copy_size = ETHTOOL_DEV_FEATURE_WORDS;
167 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
169 useraddr += sizeof(cmd);
170 if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
176 static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
178 struct ethtool_sfeatures cmd;
179 struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
180 netdev_features_t wanted = 0, valid = 0;
183 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
185 useraddr += sizeof(cmd);
187 if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
190 if (copy_from_user(features, useraddr, sizeof(features)))
193 for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
194 valid |= (netdev_features_t)features[i].valid << (32 * i);
195 wanted |= (netdev_features_t)features[i].requested << (32 * i);
198 if (valid & ~NETIF_F_ETHTOOL_BITS)
201 if (valid & ~dev->hw_features) {
202 valid &= dev->hw_features;
203 ret |= ETHTOOL_F_UNSUPPORTED;
206 dev->wanted_features &= ~valid;
207 dev->wanted_features |= wanted & valid;
208 __netdev_update_features(dev);
210 if ((dev->wanted_features ^ dev->features) & valid)
211 ret |= ETHTOOL_F_WISH;
216 static int __ethtool_get_sset_count(struct net_device *dev, int sset)
218 const struct ethtool_ops *ops = dev->ethtool_ops;
220 if (sset == ETH_SS_FEATURES)
221 return ARRAY_SIZE(netdev_features_strings);
223 if (sset == ETH_SS_RSS_HASH_FUNCS)
224 return ARRAY_SIZE(rss_hash_func_strings);
226 if (sset == ETH_SS_TUNABLES)
227 return ARRAY_SIZE(tunable_strings);
229 if (sset == ETH_SS_PHY_TUNABLES)
230 return ARRAY_SIZE(phy_tunable_strings);
232 if (sset == ETH_SS_PHY_STATS && dev->phydev &&
233 !ops->get_ethtool_phy_stats)
234 return phy_ethtool_get_sset_count(dev->phydev);
236 if (ops->get_sset_count && ops->get_strings)
237 return ops->get_sset_count(dev, sset);
242 static void __ethtool_get_strings(struct net_device *dev,
243 u32 stringset, u8 *data)
245 const struct ethtool_ops *ops = dev->ethtool_ops;
247 if (stringset == ETH_SS_FEATURES)
248 memcpy(data, netdev_features_strings,
249 sizeof(netdev_features_strings));
250 else if (stringset == ETH_SS_RSS_HASH_FUNCS)
251 memcpy(data, rss_hash_func_strings,
252 sizeof(rss_hash_func_strings));
253 else if (stringset == ETH_SS_TUNABLES)
254 memcpy(data, tunable_strings, sizeof(tunable_strings));
255 else if (stringset == ETH_SS_PHY_TUNABLES)
256 memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings));
257 else if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
258 !ops->get_ethtool_phy_stats)
259 phy_ethtool_get_strings(dev->phydev, data);
261 /* ops->get_strings is valid because checked earlier */
262 ops->get_strings(dev, stringset, data);
265 static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
267 /* feature masks of legacy discrete ethtool ops */
270 case ETHTOOL_GTXCSUM:
271 case ETHTOOL_STXCSUM:
272 return NETIF_F_CSUM_MASK | NETIF_F_SCTP_CRC;
273 case ETHTOOL_GRXCSUM:
274 case ETHTOOL_SRXCSUM:
275 return NETIF_F_RXCSUM;
281 return NETIF_F_ALL_TSO;
293 static int ethtool_get_one_feature(struct net_device *dev,
294 char __user *useraddr, u32 ethcmd)
296 netdev_features_t mask = ethtool_get_feature_mask(ethcmd);
297 struct ethtool_value edata = {
299 .data = !!(dev->features & mask),
302 if (copy_to_user(useraddr, &edata, sizeof(edata)))
307 static int ethtool_set_one_feature(struct net_device *dev,
308 void __user *useraddr, u32 ethcmd)
310 struct ethtool_value edata;
311 netdev_features_t mask;
313 if (copy_from_user(&edata, useraddr, sizeof(edata)))
316 mask = ethtool_get_feature_mask(ethcmd);
317 mask &= dev->hw_features;
322 dev->wanted_features |= mask;
324 dev->wanted_features &= ~mask;
326 __netdev_update_features(dev);
331 #define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
332 ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
333 #define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX | \
334 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_NTUPLE | \
337 static u32 __ethtool_get_flags(struct net_device *dev)
341 if (dev->features & NETIF_F_LRO)
342 flags |= ETH_FLAG_LRO;
343 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
344 flags |= ETH_FLAG_RXVLAN;
345 if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
346 flags |= ETH_FLAG_TXVLAN;
347 if (dev->features & NETIF_F_NTUPLE)
348 flags |= ETH_FLAG_NTUPLE;
349 if (dev->features & NETIF_F_RXHASH)
350 flags |= ETH_FLAG_RXHASH;
355 static int __ethtool_set_flags(struct net_device *dev, u32 data)
357 netdev_features_t features = 0, changed;
359 if (data & ~ETH_ALL_FLAGS)
362 if (data & ETH_FLAG_LRO)
363 features |= NETIF_F_LRO;
364 if (data & ETH_FLAG_RXVLAN)
365 features |= NETIF_F_HW_VLAN_CTAG_RX;
366 if (data & ETH_FLAG_TXVLAN)
367 features |= NETIF_F_HW_VLAN_CTAG_TX;
368 if (data & ETH_FLAG_NTUPLE)
369 features |= NETIF_F_NTUPLE;
370 if (data & ETH_FLAG_RXHASH)
371 features |= NETIF_F_RXHASH;
373 /* allow changing only bits set in hw_features */
374 changed = (features ^ dev->features) & ETH_ALL_FEATURES;
375 if (changed & ~dev->hw_features)
376 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
378 dev->wanted_features =
379 (dev->wanted_features & ~changed) | (features & changed);
381 __netdev_update_features(dev);
386 /* Given two link masks, AND them together and save the result in dst. */
387 void ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst,
388 struct ethtool_link_ksettings *src)
390 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
391 unsigned int idx = 0;
393 for (; idx < size; idx++) {
394 dst->link_modes.supported[idx] &=
395 src->link_modes.supported[idx];
396 dst->link_modes.advertising[idx] &=
397 src->link_modes.advertising[idx];
400 EXPORT_SYMBOL(ethtool_intersect_link_masks);
402 void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
405 bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
408 EXPORT_SYMBOL(ethtool_convert_legacy_u32_to_link_mode);
410 /* return false if src had higher bits set. lower bits always updated. */
411 bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
412 const unsigned long *src)
416 /* TODO: following test will soon always be true */
417 if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) {
418 __ETHTOOL_DECLARE_LINK_MODE_MASK(ext);
420 bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
421 bitmap_fill(ext, 32);
422 bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS);
423 if (bitmap_intersects(ext, src,
424 __ETHTOOL_LINK_MODE_MASK_NBITS)) {
425 /* src mask goes beyond bit 31 */
429 *legacy_u32 = src[0];
432 EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
434 /* return false if legacy contained non-0 deprecated fields
435 * maxtxpkt/maxrxpkt. rest of ksettings always updated
438 convert_legacy_settings_to_link_ksettings(
439 struct ethtool_link_ksettings *link_ksettings,
440 const struct ethtool_cmd *legacy_settings)
444 memset(link_ksettings, 0, sizeof(*link_ksettings));
446 /* This is used to tell users that driver is still using these
447 * deprecated legacy fields, and they should not use
448 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
450 if (legacy_settings->maxtxpkt ||
451 legacy_settings->maxrxpkt)
454 ethtool_convert_legacy_u32_to_link_mode(
455 link_ksettings->link_modes.supported,
456 legacy_settings->supported);
457 ethtool_convert_legacy_u32_to_link_mode(
458 link_ksettings->link_modes.advertising,
459 legacy_settings->advertising);
460 ethtool_convert_legacy_u32_to_link_mode(
461 link_ksettings->link_modes.lp_advertising,
462 legacy_settings->lp_advertising);
463 link_ksettings->base.speed
464 = ethtool_cmd_speed(legacy_settings);
465 link_ksettings->base.duplex
466 = legacy_settings->duplex;
467 link_ksettings->base.port
468 = legacy_settings->port;
469 link_ksettings->base.phy_address
470 = legacy_settings->phy_address;
471 link_ksettings->base.autoneg
472 = legacy_settings->autoneg;
473 link_ksettings->base.mdio_support
474 = legacy_settings->mdio_support;
475 link_ksettings->base.eth_tp_mdix
476 = legacy_settings->eth_tp_mdix;
477 link_ksettings->base.eth_tp_mdix_ctrl
478 = legacy_settings->eth_tp_mdix_ctrl;
482 /* return false if ksettings link modes had higher bits
483 * set. legacy_settings always updated (best effort)
486 convert_link_ksettings_to_legacy_settings(
487 struct ethtool_cmd *legacy_settings,
488 const struct ethtool_link_ksettings *link_ksettings)
492 memset(legacy_settings, 0, sizeof(*legacy_settings));
493 /* this also clears the deprecated fields in legacy structure:
499 retval &= ethtool_convert_link_mode_to_legacy_u32(
500 &legacy_settings->supported,
501 link_ksettings->link_modes.supported);
502 retval &= ethtool_convert_link_mode_to_legacy_u32(
503 &legacy_settings->advertising,
504 link_ksettings->link_modes.advertising);
505 retval &= ethtool_convert_link_mode_to_legacy_u32(
506 &legacy_settings->lp_advertising,
507 link_ksettings->link_modes.lp_advertising);
508 ethtool_cmd_speed_set(legacy_settings, link_ksettings->base.speed);
509 legacy_settings->duplex
510 = link_ksettings->base.duplex;
511 legacy_settings->port
512 = link_ksettings->base.port;
513 legacy_settings->phy_address
514 = link_ksettings->base.phy_address;
515 legacy_settings->autoneg
516 = link_ksettings->base.autoneg;
517 legacy_settings->mdio_support
518 = link_ksettings->base.mdio_support;
519 legacy_settings->eth_tp_mdix
520 = link_ksettings->base.eth_tp_mdix;
521 legacy_settings->eth_tp_mdix_ctrl
522 = link_ksettings->base.eth_tp_mdix_ctrl;
523 legacy_settings->transceiver
524 = link_ksettings->base.transceiver;
528 /* number of 32-bit words to store the user's link mode bitmaps */
529 #define __ETHTOOL_LINK_MODE_MASK_NU32 \
530 DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
532 /* layout of the struct passed from/to userland */
533 struct ethtool_link_usettings {
534 struct ethtool_link_settings base;
536 __u32 supported[__ETHTOOL_LINK_MODE_MASK_NU32];
537 __u32 advertising[__ETHTOOL_LINK_MODE_MASK_NU32];
538 __u32 lp_advertising[__ETHTOOL_LINK_MODE_MASK_NU32];
542 /* Internal kernel helper to query a device ethtool_link_settings. */
543 int __ethtool_get_link_ksettings(struct net_device *dev,
544 struct ethtool_link_ksettings *link_ksettings)
548 if (!dev->ethtool_ops->get_link_ksettings)
551 memset(link_ksettings, 0, sizeof(*link_ksettings));
552 return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
554 EXPORT_SYMBOL(__ethtool_get_link_ksettings);
556 /* convert ethtool_link_usettings in user space to a kernel internal
557 * ethtool_link_ksettings. return 0 on success, errno on error.
559 static int load_link_ksettings_from_user(struct ethtool_link_ksettings *to,
560 const void __user *from)
562 struct ethtool_link_usettings link_usettings;
564 if (copy_from_user(&link_usettings, from, sizeof(link_usettings)))
567 memcpy(&to->base, &link_usettings.base, sizeof(to->base));
568 bitmap_from_arr32(to->link_modes.supported,
569 link_usettings.link_modes.supported,
570 __ETHTOOL_LINK_MODE_MASK_NBITS);
571 bitmap_from_arr32(to->link_modes.advertising,
572 link_usettings.link_modes.advertising,
573 __ETHTOOL_LINK_MODE_MASK_NBITS);
574 bitmap_from_arr32(to->link_modes.lp_advertising,
575 link_usettings.link_modes.lp_advertising,
576 __ETHTOOL_LINK_MODE_MASK_NBITS);
581 /* convert a kernel internal ethtool_link_ksettings to
582 * ethtool_link_usettings in user space. return 0 on success, errno on
586 store_link_ksettings_for_user(void __user *to,
587 const struct ethtool_link_ksettings *from)
589 struct ethtool_link_usettings link_usettings;
591 memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
592 bitmap_to_arr32(link_usettings.link_modes.supported,
593 from->link_modes.supported,
594 __ETHTOOL_LINK_MODE_MASK_NBITS);
595 bitmap_to_arr32(link_usettings.link_modes.advertising,
596 from->link_modes.advertising,
597 __ETHTOOL_LINK_MODE_MASK_NBITS);
598 bitmap_to_arr32(link_usettings.link_modes.lp_advertising,
599 from->link_modes.lp_advertising,
600 __ETHTOOL_LINK_MODE_MASK_NBITS);
602 if (copy_to_user(to, &link_usettings, sizeof(link_usettings)))
608 /* Query device for its ethtool_link_settings. */
609 static int ethtool_get_link_ksettings(struct net_device *dev,
610 void __user *useraddr)
613 struct ethtool_link_ksettings link_ksettings;
616 if (!dev->ethtool_ops->get_link_ksettings)
619 /* handle bitmap nbits handshake */
620 if (copy_from_user(&link_ksettings.base, useraddr,
621 sizeof(link_ksettings.base)))
624 if (__ETHTOOL_LINK_MODE_MASK_NU32
625 != link_ksettings.base.link_mode_masks_nwords) {
626 /* wrong link mode nbits requested */
627 memset(&link_ksettings, 0, sizeof(link_ksettings));
628 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS;
629 /* send back number of words required as negative val */
630 compiletime_assert(__ETHTOOL_LINK_MODE_MASK_NU32 <= S8_MAX,
631 "need too many bits for link modes!");
632 link_ksettings.base.link_mode_masks_nwords
633 = -((s8)__ETHTOOL_LINK_MODE_MASK_NU32);
635 /* copy the base fields back to user, not the link
638 if (copy_to_user(useraddr, &link_ksettings.base,
639 sizeof(link_ksettings.base)))
645 /* handshake successful: user/kernel agree on
646 * link_mode_masks_nwords
649 memset(&link_ksettings, 0, sizeof(link_ksettings));
650 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
654 /* make sure we tell the right values to user */
655 link_ksettings.base.cmd = ETHTOOL_GLINKSETTINGS;
656 link_ksettings.base.link_mode_masks_nwords
657 = __ETHTOOL_LINK_MODE_MASK_NU32;
659 return store_link_ksettings_for_user(useraddr, &link_ksettings);
662 /* Update device ethtool_link_settings. */
663 static int ethtool_set_link_ksettings(struct net_device *dev,
664 void __user *useraddr)
667 struct ethtool_link_ksettings link_ksettings;
671 if (!dev->ethtool_ops->set_link_ksettings)
674 /* make sure nbits field has expected value */
675 if (copy_from_user(&link_ksettings.base, useraddr,
676 sizeof(link_ksettings.base)))
679 if (__ETHTOOL_LINK_MODE_MASK_NU32
680 != link_ksettings.base.link_mode_masks_nwords)
683 /* copy the whole structure, now that we know it has expected
686 err = load_link_ksettings_from_user(&link_ksettings, useraddr);
690 /* re-check nwords field, just in case */
691 if (__ETHTOOL_LINK_MODE_MASK_NU32
692 != link_ksettings.base.link_mode_masks_nwords)
695 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
698 /* Query device for its ethtool_cmd settings.
700 * Backward compatibility note: for compatibility with legacy ethtool, this is
701 * now implemented via get_link_ksettings. When driver reports higher link mode
702 * bits, a kernel warning is logged once (with name of 1st driver/device) to
703 * recommend user to upgrade ethtool, but the command is successful (only the
704 * lower link mode bits reported back to user). Deprecated fields from
705 * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero.
707 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
709 struct ethtool_link_ksettings link_ksettings;
710 struct ethtool_cmd cmd;
714 if (!dev->ethtool_ops->get_link_ksettings)
717 memset(&link_ksettings, 0, sizeof(link_ksettings));
718 err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings);
721 convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings);
723 /* send a sensible cmd tag back to user */
724 cmd.cmd = ETHTOOL_GSET;
726 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
732 /* Update device link settings with given ethtool_cmd.
734 * Backward compatibility note: for compatibility with legacy ethtool, this is
735 * now always implemented via set_link_settings. When user's request updates
736 * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel
737 * warning is logged once (with name of 1st driver/device) to recommend user to
738 * upgrade ethtool, and the request is rejected.
740 static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
742 struct ethtool_link_ksettings link_ksettings;
743 struct ethtool_cmd cmd;
747 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
749 if (!dev->ethtool_ops->set_link_ksettings)
752 if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd))
754 link_ksettings.base.link_mode_masks_nwords =
755 __ETHTOOL_LINK_MODE_MASK_NU32;
756 return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
759 static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
760 void __user *useraddr)
762 struct ethtool_drvinfo info;
763 const struct ethtool_ops *ops = dev->ethtool_ops;
765 memset(&info, 0, sizeof(info));
766 info.cmd = ETHTOOL_GDRVINFO;
767 if (ops->get_drvinfo) {
768 ops->get_drvinfo(dev, &info);
769 } else if (dev->dev.parent && dev->dev.parent->driver) {
770 strlcpy(info.bus_info, dev_name(dev->dev.parent),
771 sizeof(info.bus_info));
772 strlcpy(info.driver, dev->dev.parent->driver->name,
773 sizeof(info.driver));
779 * this method of obtaining string set info is deprecated;
780 * Use ETHTOOL_GSSET_INFO instead.
782 if (ops->get_sset_count) {
785 rc = ops->get_sset_count(dev, ETH_SS_TEST);
787 info.testinfo_len = rc;
788 rc = ops->get_sset_count(dev, ETH_SS_STATS);
791 rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS);
793 info.n_priv_flags = rc;
795 if (ops->get_regs_len) {
796 int ret = ops->get_regs_len(dev);
799 info.regdump_len = ret;
802 if (ops->get_eeprom_len)
803 info.eedump_len = ops->get_eeprom_len(dev);
805 if (!info.fw_version[0])
806 devlink_compat_running_version(dev, info.fw_version,
807 sizeof(info.fw_version));
809 if (copy_to_user(useraddr, &info, sizeof(info)))
814 static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
815 void __user *useraddr)
817 struct ethtool_sset_info info;
819 int i, idx = 0, n_bits = 0, ret, rc;
820 u32 *info_buf = NULL;
822 if (copy_from_user(&info, useraddr, sizeof(info)))
825 /* store copy of mask, because we zero struct later on */
826 sset_mask = info.sset_mask;
830 /* calculate size of return buffer */
831 n_bits = hweight64(sset_mask);
833 memset(&info, 0, sizeof(info));
834 info.cmd = ETHTOOL_GSSET_INFO;
836 info_buf = kcalloc(n_bits, sizeof(u32), GFP_USER);
841 * fill return buffer based on input bitmask and successful
842 * get_sset_count return
844 for (i = 0; i < 64; i++) {
845 if (!(sset_mask & (1ULL << i)))
848 rc = __ethtool_get_sset_count(dev, i);
850 info.sset_mask |= (1ULL << i);
851 info_buf[idx++] = rc;
856 if (copy_to_user(useraddr, &info, sizeof(info)))
859 useraddr += offsetof(struct ethtool_sset_info, data);
860 if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
870 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
871 u32 cmd, void __user *useraddr)
873 struct ethtool_rxnfc info;
874 size_t info_size = sizeof(info);
877 if (!dev->ethtool_ops->set_rxnfc)
880 /* struct ethtool_rxnfc was originally defined for
881 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
882 * members. User-space might still be using that
884 if (cmd == ETHTOOL_SRXFH)
885 info_size = (offsetof(struct ethtool_rxnfc, data) +
888 if (copy_from_user(&info, useraddr, info_size))
891 rc = dev->ethtool_ops->set_rxnfc(dev, &info);
895 if (cmd == ETHTOOL_SRXCLSRLINS &&
896 copy_to_user(useraddr, &info, info_size))
902 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
903 u32 cmd, void __user *useraddr)
905 struct ethtool_rxnfc info;
906 size_t info_size = sizeof(info);
907 const struct ethtool_ops *ops = dev->ethtool_ops;
909 void *rule_buf = NULL;
914 /* struct ethtool_rxnfc was originally defined for
915 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
916 * members. User-space might still be using that
918 if (cmd == ETHTOOL_GRXFH)
919 info_size = (offsetof(struct ethtool_rxnfc, data) +
922 if (copy_from_user(&info, useraddr, info_size))
925 /* If FLOW_RSS was requested then user-space must be using the
926 * new definition, as FLOW_RSS is newer.
928 if (cmd == ETHTOOL_GRXFH && info.flow_type & FLOW_RSS) {
929 info_size = sizeof(info);
930 if (copy_from_user(&info, useraddr, info_size))
932 /* Since malicious users may modify the original data,
933 * we need to check whether FLOW_RSS is still requested.
935 if (!(info.flow_type & FLOW_RSS))
942 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
943 if (info.rule_cnt > 0) {
944 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
945 rule_buf = kcalloc(info.rule_cnt, sizeof(u32),
952 ret = ops->get_rxnfc(dev, &info, rule_buf);
957 if (copy_to_user(useraddr, &info, info_size))
961 useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
962 if (copy_to_user(useraddr, rule_buf,
963 info.rule_cnt * sizeof(u32)))
974 static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
975 struct ethtool_rxnfc *rx_rings,
980 if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
983 /* Validate ring indices */
984 for (i = 0; i < size; i++)
985 if (indir[i] >= rx_rings->data)
991 u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
993 void netdev_rss_key_fill(void *buffer, size_t len)
995 BUG_ON(len > sizeof(netdev_rss_key));
996 net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key));
997 memcpy(buffer, netdev_rss_key, len);
999 EXPORT_SYMBOL(netdev_rss_key_fill);
1001 static int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
1003 u32 dev_size, current_max = 0;
1007 if (!dev->ethtool_ops->get_rxfh_indir_size ||
1008 !dev->ethtool_ops->get_rxfh)
1010 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
1014 indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
1018 ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
1023 current_max = max(current_max, indir[dev_size]);
1032 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
1033 void __user *useraddr)
1035 u32 user_size, dev_size;
1039 if (!dev->ethtool_ops->get_rxfh_indir_size ||
1040 !dev->ethtool_ops->get_rxfh)
1042 dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
1046 if (copy_from_user(&user_size,
1047 useraddr + offsetof(struct ethtool_rxfh_indir, size),
1051 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh_indir, size),
1052 &dev_size, sizeof(dev_size)))
1055 /* If the user buffer size is 0, this is just a query for the
1056 * device table size. Otherwise, if it's smaller than the
1057 * device table size it's an error.
1059 if (user_size < dev_size)
1060 return user_size == 0 ? 0 : -EINVAL;
1062 indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
1066 ret = dev->ethtool_ops->get_rxfh(dev, indir, NULL, NULL);
1070 if (copy_to_user(useraddr +
1071 offsetof(struct ethtool_rxfh_indir, ring_index[0]),
1072 indir, dev_size * sizeof(indir[0])))
1080 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
1081 void __user *useraddr)
1083 struct ethtool_rxnfc rx_rings;
1084 u32 user_size, dev_size, i;
1086 const struct ethtool_ops *ops = dev->ethtool_ops;
1088 u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
1090 if (!ops->get_rxfh_indir_size || !ops->set_rxfh ||
1094 dev_size = ops->get_rxfh_indir_size(dev);
1098 if (copy_from_user(&user_size,
1099 useraddr + offsetof(struct ethtool_rxfh_indir, size),
1103 if (user_size != 0 && user_size != dev_size)
1106 indir = kcalloc(dev_size, sizeof(indir[0]), GFP_USER);
1110 rx_rings.cmd = ETHTOOL_GRXRINGS;
1111 ret = ops->get_rxnfc(dev, &rx_rings, NULL);
1115 if (user_size == 0) {
1116 for (i = 0; i < dev_size; i++)
1117 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
1119 ret = ethtool_copy_validate_indir(indir,
1120 useraddr + ringidx_offset,
1127 ret = ops->set_rxfh(dev, indir, NULL, ETH_RSS_HASH_NO_CHANGE);
1131 /* indicate whether rxfh was set to default */
1133 dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
1135 dev->priv_flags |= IFF_RXFH_CONFIGURED;
1142 static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
1143 void __user *useraddr)
1146 const struct ethtool_ops *ops = dev->ethtool_ops;
1147 u32 user_indir_size, user_key_size;
1148 u32 dev_indir_size = 0, dev_key_size = 0;
1149 struct ethtool_rxfh rxfh;
1160 if (ops->get_rxfh_indir_size)
1161 dev_indir_size = ops->get_rxfh_indir_size(dev);
1162 if (ops->get_rxfh_key_size)
1163 dev_key_size = ops->get_rxfh_key_size(dev);
1165 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
1167 user_indir_size = rxfh.indir_size;
1168 user_key_size = rxfh.key_size;
1170 /* Check that reserved fields are 0 for now */
1171 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32)
1173 /* Most drivers don't handle rss_context, check it's 0 as well */
1174 if (rxfh.rss_context && !ops->get_rxfh_context)
1177 rxfh.indir_size = dev_indir_size;
1178 rxfh.key_size = dev_key_size;
1179 if (copy_to_user(useraddr, &rxfh, sizeof(rxfh)))
1182 if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
1183 (user_key_size && (user_key_size != dev_key_size)))
1186 indir_bytes = user_indir_size * sizeof(indir[0]);
1187 total_size = indir_bytes + user_key_size;
1188 rss_config = kzalloc(total_size, GFP_USER);
1192 if (user_indir_size)
1193 indir = (u32 *)rss_config;
1196 hkey = rss_config + indir_bytes;
1198 if (rxfh.rss_context)
1199 ret = dev->ethtool_ops->get_rxfh_context(dev, indir, hkey,
1203 ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey, &dev_hfunc);
1207 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc),
1208 &dev_hfunc, sizeof(rxfh.hfunc))) {
1210 } else if (copy_to_user(useraddr +
1211 offsetof(struct ethtool_rxfh, rss_config[0]),
1212 rss_config, total_size)) {
1221 static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
1222 void __user *useraddr)
1225 const struct ethtool_ops *ops = dev->ethtool_ops;
1226 struct ethtool_rxnfc rx_rings;
1227 struct ethtool_rxfh rxfh;
1228 u32 dev_indir_size = 0, dev_key_size = 0, i;
1229 u32 *indir = NULL, indir_bytes = 0;
1232 u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
1233 bool delete = false;
1235 if (!ops->get_rxnfc || !ops->set_rxfh)
1238 if (ops->get_rxfh_indir_size)
1239 dev_indir_size = ops->get_rxfh_indir_size(dev);
1240 if (ops->get_rxfh_key_size)
1241 dev_key_size = ops->get_rxfh_key_size(dev);
1243 if (copy_from_user(&rxfh, useraddr, sizeof(rxfh)))
1246 /* Check that reserved fields are 0 for now */
1247 if (rxfh.rsvd8[0] || rxfh.rsvd8[1] || rxfh.rsvd8[2] || rxfh.rsvd32)
1249 /* Most drivers don't handle rss_context, check it's 0 as well */
1250 if (rxfh.rss_context && !ops->set_rxfh_context)
1253 /* If either indir, hash key or function is valid, proceed further.
1254 * Must request at least one change: indir size, hash key or function.
1256 if ((rxfh.indir_size &&
1257 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE &&
1258 rxfh.indir_size != dev_indir_size) ||
1259 (rxfh.key_size && (rxfh.key_size != dev_key_size)) ||
1260 (rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE &&
1261 rxfh.key_size == 0 && rxfh.hfunc == ETH_RSS_HASH_NO_CHANGE))
1264 if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
1265 indir_bytes = dev_indir_size * sizeof(indir[0]);
1267 rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
1271 rx_rings.cmd = ETHTOOL_GRXRINGS;
1272 ret = ops->get_rxnfc(dev, &rx_rings, NULL);
1276 /* rxfh.indir_size == 0 means reset the indir table to default (master
1277 * context) or delete the context (other RSS contexts).
1278 * rxfh.indir_size == ETH_RXFH_INDIR_NO_CHANGE means leave it unchanged.
1280 if (rxfh.indir_size &&
1281 rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
1282 indir = (u32 *)rss_config;
1283 ret = ethtool_copy_validate_indir(indir,
1284 useraddr + rss_cfg_offset,
1289 } else if (rxfh.indir_size == 0) {
1290 if (rxfh.rss_context == 0) {
1291 indir = (u32 *)rss_config;
1292 for (i = 0; i < dev_indir_size; i++)
1293 indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
1299 if (rxfh.key_size) {
1300 hkey = rss_config + indir_bytes;
1301 if (copy_from_user(hkey,
1302 useraddr + rss_cfg_offset + indir_bytes,
1309 if (rxfh.rss_context)
1310 ret = ops->set_rxfh_context(dev, indir, hkey, rxfh.hfunc,
1311 &rxfh.rss_context, delete);
1313 ret = ops->set_rxfh(dev, indir, hkey, rxfh.hfunc);
1317 if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
1318 &rxfh.rss_context, sizeof(rxfh.rss_context)))
1321 if (!rxfh.rss_context) {
1322 /* indicate whether rxfh was set to default */
1323 if (rxfh.indir_size == 0)
1324 dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
1325 else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
1326 dev->priv_flags |= IFF_RXFH_CONFIGURED;
1334 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1336 struct ethtool_regs regs;
1337 const struct ethtool_ops *ops = dev->ethtool_ops;
1341 if (!ops->get_regs || !ops->get_regs_len)
1344 if (copy_from_user(®s, useraddr, sizeof(regs)))
1347 reglen = ops->get_regs_len(dev);
1351 if (regs.len > reglen)
1354 regbuf = vzalloc(reglen);
1358 ops->get_regs(dev, ®s, regbuf);
1361 if (copy_to_user(useraddr, ®s, sizeof(regs)))
1363 useraddr += offsetof(struct ethtool_regs, data);
1364 if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
1373 static int ethtool_reset(struct net_device *dev, char __user *useraddr)
1375 struct ethtool_value reset;
1378 if (!dev->ethtool_ops->reset)
1381 if (copy_from_user(&reset, useraddr, sizeof(reset)))
1384 ret = dev->ethtool_ops->reset(dev, &reset.data);
1388 if (copy_to_user(useraddr, &reset, sizeof(reset)))
1393 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
1395 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1397 if (!dev->ethtool_ops->get_wol)
1400 dev->ethtool_ops->get_wol(dev, &wol);
1402 if (copy_to_user(useraddr, &wol, sizeof(wol)))
1407 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
1409 struct ethtool_wolinfo wol;
1412 if (!dev->ethtool_ops->set_wol)
1415 if (copy_from_user(&wol, useraddr, sizeof(wol)))
1418 ret = dev->ethtool_ops->set_wol(dev, &wol);
1422 dev->wol_enabled = !!wol.wolopts;
1427 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
1429 struct ethtool_eee edata;
1432 if (!dev->ethtool_ops->get_eee)
1435 memset(&edata, 0, sizeof(struct ethtool_eee));
1436 edata.cmd = ETHTOOL_GEEE;
1437 rc = dev->ethtool_ops->get_eee(dev, &edata);
1442 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1448 static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
1450 struct ethtool_eee edata;
1452 if (!dev->ethtool_ops->set_eee)
1455 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1458 return dev->ethtool_ops->set_eee(dev, &edata);
1461 static int ethtool_nway_reset(struct net_device *dev)
1463 if (!dev->ethtool_ops->nway_reset)
1466 return dev->ethtool_ops->nway_reset(dev);
1469 static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
1471 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
1473 if (!dev->ethtool_ops->get_link)
1476 edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev);
1478 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1483 static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
1484 int (*getter)(struct net_device *,
1485 struct ethtool_eeprom *, u8 *),
1488 struct ethtool_eeprom eeprom;
1489 void __user *userbuf = useraddr + sizeof(eeprom);
1490 u32 bytes_remaining;
1494 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
1497 /* Check for wrap and zero */
1498 if (eeprom.offset + eeprom.len <= eeprom.offset)
1501 /* Check for exceeding total eeprom len */
1502 if (eeprom.offset + eeprom.len > total_len)
1505 data = kmalloc(PAGE_SIZE, GFP_USER);
1509 bytes_remaining = eeprom.len;
1510 while (bytes_remaining > 0) {
1511 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
1513 ret = getter(dev, &eeprom, data);
1516 if (copy_to_user(userbuf, data, eeprom.len)) {
1520 userbuf += eeprom.len;
1521 eeprom.offset += eeprom.len;
1522 bytes_remaining -= eeprom.len;
1525 eeprom.len = userbuf - (useraddr + sizeof(eeprom));
1526 eeprom.offset -= eeprom.len;
1527 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
1534 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
1536 const struct ethtool_ops *ops = dev->ethtool_ops;
1538 if (!ops->get_eeprom || !ops->get_eeprom_len ||
1539 !ops->get_eeprom_len(dev))
1542 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
1543 ops->get_eeprom_len(dev));
1546 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
1548 struct ethtool_eeprom eeprom;
1549 const struct ethtool_ops *ops = dev->ethtool_ops;
1550 void __user *userbuf = useraddr + sizeof(eeprom);
1551 u32 bytes_remaining;
1555 if (!ops->set_eeprom || !ops->get_eeprom_len ||
1556 !ops->get_eeprom_len(dev))
1559 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
1562 /* Check for wrap and zero */
1563 if (eeprom.offset + eeprom.len <= eeprom.offset)
1566 /* Check for exceeding total eeprom len */
1567 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
1570 data = kmalloc(PAGE_SIZE, GFP_USER);
1574 bytes_remaining = eeprom.len;
1575 while (bytes_remaining > 0) {
1576 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
1578 if (copy_from_user(data, userbuf, eeprom.len)) {
1582 ret = ops->set_eeprom(dev, &eeprom, data);
1585 userbuf += eeprom.len;
1586 eeprom.offset += eeprom.len;
1587 bytes_remaining -= eeprom.len;
1594 static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
1595 void __user *useraddr)
1597 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
1599 if (!dev->ethtool_ops->get_coalesce)
1602 dev->ethtool_ops->get_coalesce(dev, &coalesce);
1604 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
1609 static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
1610 void __user *useraddr)
1612 struct ethtool_coalesce coalesce;
1614 if (!dev->ethtool_ops->set_coalesce)
1617 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
1620 return dev->ethtool_ops->set_coalesce(dev, &coalesce);
1623 static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
1625 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
1627 if (!dev->ethtool_ops->get_ringparam)
1630 dev->ethtool_ops->get_ringparam(dev, &ringparam);
1632 if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
1637 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1639 struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
1641 if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
1644 if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
1647 dev->ethtool_ops->get_ringparam(dev, &max);
1649 /* ensure new ring parameters are within the maximums */
1650 if (ringparam.rx_pending > max.rx_max_pending ||
1651 ringparam.rx_mini_pending > max.rx_mini_max_pending ||
1652 ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending ||
1653 ringparam.tx_pending > max.tx_max_pending)
1656 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1659 static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1660 void __user *useraddr)
1662 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1664 if (!dev->ethtool_ops->get_channels)
1667 dev->ethtool_ops->get_channels(dev, &channels);
1669 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1674 static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1675 void __user *useraddr)
1677 struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS };
1678 u16 from_channel, to_channel;
1679 u32 max_rx_in_use = 0;
1682 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
1685 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1688 dev->ethtool_ops->get_channels(dev, &curr);
1690 /* ensure new counts are within the maximums */
1691 if (channels.rx_count > curr.max_rx ||
1692 channels.tx_count > curr.max_tx ||
1693 channels.combined_count > curr.max_combined ||
1694 channels.other_count > curr.max_other)
1697 /* ensure the new Rx count fits within the configured Rx flow
1698 * indirection table settings */
1699 if (netif_is_rxfh_configured(dev) &&
1700 !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
1701 (channels.combined_count + channels.rx_count) <= max_rx_in_use)
1704 /* Disabling channels, query zero-copy AF_XDP sockets */
1705 from_channel = channels.combined_count +
1706 min(channels.rx_count, channels.tx_count);
1707 to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
1708 for (i = from_channel; i < to_channel; i++)
1709 if (xdp_get_umem_from_qid(dev, i))
1712 return dev->ethtool_ops->set_channels(dev, &channels);
1715 static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1717 struct ethtool_pauseparam pauseparam = { .cmd = ETHTOOL_GPAUSEPARAM };
1719 if (!dev->ethtool_ops->get_pauseparam)
1722 dev->ethtool_ops->get_pauseparam(dev, &pauseparam);
1724 if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
1729 static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
1731 struct ethtool_pauseparam pauseparam;
1733 if (!dev->ethtool_ops->set_pauseparam)
1736 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
1739 return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
1742 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
1744 struct ethtool_test test;
1745 const struct ethtool_ops *ops = dev->ethtool_ops;
1749 if (!ops->self_test || !ops->get_sset_count)
1752 test_len = ops->get_sset_count(dev, ETH_SS_TEST);
1755 WARN_ON(test_len == 0);
1757 if (copy_from_user(&test, useraddr, sizeof(test)))
1760 test.len = test_len;
1761 data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
1765 ops->self_test(dev, &test, data);
1768 if (copy_to_user(useraddr, &test, sizeof(test)))
1770 useraddr += sizeof(test);
1771 if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
1780 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1782 struct ethtool_gstrings gstrings;
1786 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
1789 ret = __ethtool_get_sset_count(dev, gstrings.string_set);
1792 if (ret > S32_MAX / ETH_GSTRING_LEN)
1799 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
1803 __ethtool_get_strings(dev, gstrings.string_set, data);
1809 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
1811 useraddr += sizeof(gstrings);
1813 copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
1822 static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1824 struct ethtool_value id;
1826 const struct ethtool_ops *ops = dev->ethtool_ops;
1829 if (!ops->set_phys_id)
1835 if (copy_from_user(&id, useraddr, sizeof(id)))
1838 rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1842 /* Drop the RTNL lock while waiting, but prevent reentry or
1843 * removal of the device.
1850 /* Driver will handle this itself */
1851 schedule_timeout_interruptible(
1852 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1854 /* Driver expects to be called at twice the frequency in rc */
1855 int n = rc * 2, i, interval = HZ / n;
1857 /* Count down seconds */
1859 /* Count down iterations per second */
1863 rc = ops->set_phys_id(dev,
1864 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1868 schedule_timeout_interruptible(interval);
1869 } while (!signal_pending(current) && --i != 0);
1870 } while (!signal_pending(current) &&
1871 (id.data == 0 || --id.data != 0));
1878 (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1882 static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
1884 struct ethtool_stats stats;
1885 const struct ethtool_ops *ops = dev->ethtool_ops;
1889 if (!ops->get_ethtool_stats || !ops->get_sset_count)
1892 n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
1895 if (n_stats > S32_MAX / sizeof(u64))
1897 WARN_ON_ONCE(!n_stats);
1898 if (copy_from_user(&stats, useraddr, sizeof(stats)))
1901 stats.n_stats = n_stats;
1904 data = vzalloc(array_size(n_stats, sizeof(u64)));
1907 ops->get_ethtool_stats(dev, &stats, data);
1913 if (copy_to_user(useraddr, &stats, sizeof(stats)))
1915 useraddr += sizeof(stats);
1916 if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
1925 static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
1927 const struct ethtool_ops *ops = dev->ethtool_ops;
1928 struct phy_device *phydev = dev->phydev;
1929 struct ethtool_stats stats;
1933 if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count))
1936 if (dev->phydev && !ops->get_ethtool_phy_stats)
1937 n_stats = phy_ethtool_get_sset_count(dev->phydev);
1939 n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
1942 if (n_stats > S32_MAX / sizeof(u64))
1944 WARN_ON_ONCE(!n_stats);
1946 if (copy_from_user(&stats, useraddr, sizeof(stats)))
1949 stats.n_stats = n_stats;
1952 data = vzalloc(array_size(n_stats, sizeof(u64)));
1956 if (dev->phydev && !ops->get_ethtool_phy_stats) {
1957 ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
1961 ops->get_ethtool_phy_stats(dev, &stats, data);
1968 if (copy_to_user(useraddr, &stats, sizeof(stats)))
1970 useraddr += sizeof(stats);
1971 if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
1980 static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
1982 struct ethtool_perm_addr epaddr;
1984 if (copy_from_user(&epaddr, useraddr, sizeof(epaddr)))
1987 if (epaddr.size < dev->addr_len)
1989 epaddr.size = dev->addr_len;
1991 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
1993 useraddr += sizeof(epaddr);
1994 if (copy_to_user(useraddr, dev->perm_addr, epaddr.size))
1999 static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
2000 u32 cmd, u32 (*actor)(struct net_device *))
2002 struct ethtool_value edata = { .cmd = cmd };
2007 edata.data = actor(dev);
2009 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2014 static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr,
2015 void (*actor)(struct net_device *, u32))
2017 struct ethtool_value edata;
2022 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2025 actor(dev, edata.data);
2029 static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
2030 int (*actor)(struct net_device *, u32))
2032 struct ethtool_value edata;
2037 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2040 return actor(dev, edata.data);
2043 static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
2044 char __user *useraddr)
2046 struct ethtool_flash efl;
2048 if (copy_from_user(&efl, useraddr, sizeof(efl)))
2050 efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
2052 if (!dev->ethtool_ops->flash_device)
2053 return devlink_compat_flash_update(dev, efl.data);
2055 return dev->ethtool_ops->flash_device(dev, &efl);
2058 static int ethtool_set_dump(struct net_device *dev,
2059 void __user *useraddr)
2061 struct ethtool_dump dump;
2063 if (!dev->ethtool_ops->set_dump)
2066 if (copy_from_user(&dump, useraddr, sizeof(dump)))
2069 return dev->ethtool_ops->set_dump(dev, &dump);
2072 static int ethtool_get_dump_flag(struct net_device *dev,
2073 void __user *useraddr)
2076 struct ethtool_dump dump;
2077 const struct ethtool_ops *ops = dev->ethtool_ops;
2079 if (!ops->get_dump_flag)
2082 if (copy_from_user(&dump, useraddr, sizeof(dump)))
2085 ret = ops->get_dump_flag(dev, &dump);
2089 if (copy_to_user(useraddr, &dump, sizeof(dump)))
2094 static int ethtool_get_dump_data(struct net_device *dev,
2095 void __user *useraddr)
2099 struct ethtool_dump dump, tmp;
2100 const struct ethtool_ops *ops = dev->ethtool_ops;
2103 if (!ops->get_dump_data || !ops->get_dump_flag)
2106 if (copy_from_user(&dump, useraddr, sizeof(dump)))
2109 memset(&tmp, 0, sizeof(tmp));
2110 tmp.cmd = ETHTOOL_GET_DUMP_FLAG;
2111 ret = ops->get_dump_flag(dev, &tmp);
2115 len = min(tmp.len, dump.len);
2119 /* Don't ever let the driver think there's more space available
2120 * than it requested with .get_dump_flag().
2124 /* Always allocate enough space to hold the whole thing so that the
2125 * driver does not need to check the length and bother with partial
2128 data = vzalloc(tmp.len);
2131 ret = ops->get_dump_data(dev, &dump, data);
2135 /* There are two sane possibilities:
2136 * 1. The driver's .get_dump_data() does not touch dump.len.
2137 * 2. Or it may set dump.len to how much it really writes, which
2138 * should be tmp.len (or len if it can do a partial dump).
2139 * In any case respond to userspace with the actual length of data
2142 WARN_ON(dump.len != len && dump.len != tmp.len);
2145 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
2149 useraddr += offsetof(struct ethtool_dump, data);
2150 if (copy_to_user(useraddr, data, len))
2157 static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
2160 struct ethtool_ts_info info;
2161 const struct ethtool_ops *ops = dev->ethtool_ops;
2162 struct phy_device *phydev = dev->phydev;
2164 memset(&info, 0, sizeof(info));
2165 info.cmd = ETHTOOL_GET_TS_INFO;
2167 if (phydev && phydev->drv && phydev->drv->ts_info) {
2168 err = phydev->drv->ts_info(phydev, &info);
2169 } else if (ops->get_ts_info) {
2170 err = ops->get_ts_info(dev, &info);
2172 info.so_timestamping =
2173 SOF_TIMESTAMPING_RX_SOFTWARE |
2174 SOF_TIMESTAMPING_SOFTWARE;
2175 info.phc_index = -1;
2181 if (copy_to_user(useraddr, &info, sizeof(info)))
2187 static int __ethtool_get_module_info(struct net_device *dev,
2188 struct ethtool_modinfo *modinfo)
2190 const struct ethtool_ops *ops = dev->ethtool_ops;
2191 struct phy_device *phydev = dev->phydev;
2194 return sfp_get_module_info(dev->sfp_bus, modinfo);
2196 if (phydev && phydev->drv && phydev->drv->module_info)
2197 return phydev->drv->module_info(phydev, modinfo);
2199 if (ops->get_module_info)
2200 return ops->get_module_info(dev, modinfo);
2205 static int ethtool_get_module_info(struct net_device *dev,
2206 void __user *useraddr)
2209 struct ethtool_modinfo modinfo;
2211 if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
2214 ret = __ethtool_get_module_info(dev, &modinfo);
2218 if (copy_to_user(useraddr, &modinfo, sizeof(modinfo)))
2224 static int __ethtool_get_module_eeprom(struct net_device *dev,
2225 struct ethtool_eeprom *ee, u8 *data)
2227 const struct ethtool_ops *ops = dev->ethtool_ops;
2228 struct phy_device *phydev = dev->phydev;
2231 return sfp_get_module_eeprom(dev->sfp_bus, ee, data);
2233 if (phydev && phydev->drv && phydev->drv->module_eeprom)
2234 return phydev->drv->module_eeprom(phydev, ee, data);
2236 if (ops->get_module_eeprom)
2237 return ops->get_module_eeprom(dev, ee, data);
2242 static int ethtool_get_module_eeprom(struct net_device *dev,
2243 void __user *useraddr)
2246 struct ethtool_modinfo modinfo;
2248 ret = __ethtool_get_module_info(dev, &modinfo);
2252 return ethtool_get_any_eeprom(dev, useraddr,
2253 __ethtool_get_module_eeprom,
2254 modinfo.eeprom_len);
2257 static int ethtool_tunable_valid(const struct ethtool_tunable *tuna)
2260 case ETHTOOL_RX_COPYBREAK:
2261 case ETHTOOL_TX_COPYBREAK:
2262 if (tuna->len != sizeof(u32) ||
2263 tuna->type_id != ETHTOOL_TUNABLE_U32)
2266 case ETHTOOL_PFC_PREVENTION_TOUT:
2267 if (tuna->len != sizeof(u16) ||
2268 tuna->type_id != ETHTOOL_TUNABLE_U16)
2278 static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
2281 struct ethtool_tunable tuna;
2282 const struct ethtool_ops *ops = dev->ethtool_ops;
2285 if (!ops->get_tunable)
2287 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2289 ret = ethtool_tunable_valid(&tuna);
2292 data = kmalloc(tuna.len, GFP_USER);
2295 ret = ops->get_tunable(dev, &tuna, data);
2298 useraddr += sizeof(tuna);
2300 if (copy_to_user(useraddr, data, tuna.len))
2309 static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr)
2312 struct ethtool_tunable tuna;
2313 const struct ethtool_ops *ops = dev->ethtool_ops;
2316 if (!ops->set_tunable)
2318 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2320 ret = ethtool_tunable_valid(&tuna);
2323 useraddr += sizeof(tuna);
2324 data = memdup_user(useraddr, tuna.len);
2326 return PTR_ERR(data);
2327 ret = ops->set_tunable(dev, &tuna, data);
2333 static noinline_for_stack int
2334 ethtool_get_per_queue_coalesce(struct net_device *dev,
2335 void __user *useraddr,
2336 struct ethtool_per_queue_op *per_queue_opt)
2340 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE);
2342 if (!dev->ethtool_ops->get_per_queue_coalesce)
2345 useraddr += sizeof(*per_queue_opt);
2347 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask,
2350 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) {
2351 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
2353 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, &coalesce);
2356 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
2358 useraddr += sizeof(coalesce);
2364 static noinline_for_stack int
2365 ethtool_set_per_queue_coalesce(struct net_device *dev,
2366 void __user *useraddr,
2367 struct ethtool_per_queue_op *per_queue_opt)
2372 struct ethtool_coalesce *backup = NULL, *tmp = NULL;
2373 DECLARE_BITMAP(queue_mask, MAX_NUM_QUEUE);
2375 if ((!dev->ethtool_ops->set_per_queue_coalesce) ||
2376 (!dev->ethtool_ops->get_per_queue_coalesce))
2379 useraddr += sizeof(*per_queue_opt);
2381 bitmap_from_arr32(queue_mask, per_queue_opt->queue_mask, MAX_NUM_QUEUE);
2382 n_queue = bitmap_weight(queue_mask, MAX_NUM_QUEUE);
2383 tmp = backup = kmalloc_array(n_queue, sizeof(*backup), GFP_KERNEL);
2387 for_each_set_bit(bit, queue_mask, MAX_NUM_QUEUE) {
2388 struct ethtool_coalesce coalesce;
2390 ret = dev->ethtool_ops->get_per_queue_coalesce(dev, bit, tmp);
2396 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) {
2401 ret = dev->ethtool_ops->set_per_queue_coalesce(dev, bit, &coalesce);
2405 useraddr += sizeof(coalesce);
2411 for_each_set_bit(i, queue_mask, bit) {
2412 dev->ethtool_ops->set_per_queue_coalesce(dev, i, tmp);
2421 static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev,
2422 void __user *useraddr, u32 sub_cmd)
2424 struct ethtool_per_queue_op per_queue_opt;
2426 if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
2429 if (per_queue_opt.sub_command != sub_cmd)
2432 switch (per_queue_opt.sub_command) {
2433 case ETHTOOL_GCOALESCE:
2434 return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
2435 case ETHTOOL_SCOALESCE:
2436 return ethtool_set_per_queue_coalesce(dev, useraddr, &per_queue_opt);
2442 static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna)
2445 case ETHTOOL_PHY_DOWNSHIFT:
2446 case ETHTOOL_PHY_FAST_LINK_DOWN:
2447 if (tuna->len != sizeof(u8) ||
2448 tuna->type_id != ETHTOOL_TUNABLE_U8)
2458 static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
2461 struct ethtool_tunable tuna;
2462 struct phy_device *phydev = dev->phydev;
2465 if (!(phydev && phydev->drv && phydev->drv->get_tunable))
2468 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2470 ret = ethtool_phy_tunable_valid(&tuna);
2473 data = kmalloc(tuna.len, GFP_USER);
2476 mutex_lock(&phydev->lock);
2477 ret = phydev->drv->get_tunable(phydev, &tuna, data);
2478 mutex_unlock(&phydev->lock);
2481 useraddr += sizeof(tuna);
2483 if (copy_to_user(useraddr, data, tuna.len))
2492 static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
2495 struct ethtool_tunable tuna;
2496 struct phy_device *phydev = dev->phydev;
2499 if (!(phydev && phydev->drv && phydev->drv->set_tunable))
2501 if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
2503 ret = ethtool_phy_tunable_valid(&tuna);
2506 useraddr += sizeof(tuna);
2507 data = memdup_user(useraddr, tuna.len);
2509 return PTR_ERR(data);
2510 mutex_lock(&phydev->lock);
2511 ret = phydev->drv->set_tunable(phydev, &tuna, data);
2512 mutex_unlock(&phydev->lock);
2518 static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
2520 struct ethtool_fecparam fecparam = { .cmd = ETHTOOL_GFECPARAM };
2523 if (!dev->ethtool_ops->get_fecparam)
2526 rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
2530 if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
2535 static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr)
2537 struct ethtool_fecparam fecparam;
2539 if (!dev->ethtool_ops->set_fecparam)
2542 if (copy_from_user(&fecparam, useraddr, sizeof(fecparam)))
2545 return dev->ethtool_ops->set_fecparam(dev, &fecparam);
2548 /* The main entry point in this file. Called from net/core/dev_ioctl.c */
2550 int dev_ethtool(struct net *net, struct ifreq *ifr)
2552 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
2553 void __user *useraddr = ifr->ifr_data;
2554 u32 ethcmd, sub_cmd;
2556 netdev_features_t old_features;
2558 if (!dev || !netif_device_present(dev))
2561 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2564 if (ethcmd == ETHTOOL_PERQUEUE) {
2565 if (copy_from_user(&sub_cmd, useraddr + sizeof(ethcmd), sizeof(sub_cmd)))
2570 /* Allow some commands to be done by anyone */
2573 case ETHTOOL_GDRVINFO:
2574 case ETHTOOL_GMSGLVL:
2576 case ETHTOOL_GCOALESCE:
2577 case ETHTOOL_GRINGPARAM:
2578 case ETHTOOL_GPAUSEPARAM:
2579 case ETHTOOL_GRXCSUM:
2580 case ETHTOOL_GTXCSUM:
2582 case ETHTOOL_GSSET_INFO:
2583 case ETHTOOL_GSTRINGS:
2584 case ETHTOOL_GSTATS:
2585 case ETHTOOL_GPHYSTATS:
2587 case ETHTOOL_GPERMADDR:
2591 case ETHTOOL_GFLAGS:
2592 case ETHTOOL_GPFLAGS:
2594 case ETHTOOL_GRXRINGS:
2595 case ETHTOOL_GRXCLSRLCNT:
2596 case ETHTOOL_GRXCLSRULE:
2597 case ETHTOOL_GRXCLSRLALL:
2598 case ETHTOOL_GRXFHINDIR:
2600 case ETHTOOL_GFEATURES:
2601 case ETHTOOL_GCHANNELS:
2602 case ETHTOOL_GET_TS_INFO:
2604 case ETHTOOL_GTUNABLE:
2605 case ETHTOOL_PHY_GTUNABLE:
2606 case ETHTOOL_GLINKSETTINGS:
2607 case ETHTOOL_GFECPARAM:
2610 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2614 if (dev->ethtool_ops->begin) {
2615 rc = dev->ethtool_ops->begin(dev);
2619 old_features = dev->features;
2623 rc = ethtool_get_settings(dev, useraddr);
2626 rc = ethtool_set_settings(dev, useraddr);
2628 case ETHTOOL_GDRVINFO:
2629 rc = ethtool_get_drvinfo(dev, useraddr);
2632 rc = ethtool_get_regs(dev, useraddr);
2635 rc = ethtool_get_wol(dev, useraddr);
2638 rc = ethtool_set_wol(dev, useraddr);
2640 case ETHTOOL_GMSGLVL:
2641 rc = ethtool_get_value(dev, useraddr, ethcmd,
2642 dev->ethtool_ops->get_msglevel);
2644 case ETHTOOL_SMSGLVL:
2645 rc = ethtool_set_value_void(dev, useraddr,
2646 dev->ethtool_ops->set_msglevel);
2649 rc = ethtool_get_eee(dev, useraddr);
2652 rc = ethtool_set_eee(dev, useraddr);
2654 case ETHTOOL_NWAY_RST:
2655 rc = ethtool_nway_reset(dev);
2658 rc = ethtool_get_link(dev, useraddr);
2660 case ETHTOOL_GEEPROM:
2661 rc = ethtool_get_eeprom(dev, useraddr);
2663 case ETHTOOL_SEEPROM:
2664 rc = ethtool_set_eeprom(dev, useraddr);
2666 case ETHTOOL_GCOALESCE:
2667 rc = ethtool_get_coalesce(dev, useraddr);
2669 case ETHTOOL_SCOALESCE:
2670 rc = ethtool_set_coalesce(dev, useraddr);
2672 case ETHTOOL_GRINGPARAM:
2673 rc = ethtool_get_ringparam(dev, useraddr);
2675 case ETHTOOL_SRINGPARAM:
2676 rc = ethtool_set_ringparam(dev, useraddr);
2678 case ETHTOOL_GPAUSEPARAM:
2679 rc = ethtool_get_pauseparam(dev, useraddr);
2681 case ETHTOOL_SPAUSEPARAM:
2682 rc = ethtool_set_pauseparam(dev, useraddr);
2685 rc = ethtool_self_test(dev, useraddr);
2687 case ETHTOOL_GSTRINGS:
2688 rc = ethtool_get_strings(dev, useraddr);
2690 case ETHTOOL_PHYS_ID:
2691 rc = ethtool_phys_id(dev, useraddr);
2693 case ETHTOOL_GSTATS:
2694 rc = ethtool_get_stats(dev, useraddr);
2696 case ETHTOOL_GPERMADDR:
2697 rc = ethtool_get_perm_addr(dev, useraddr);
2699 case ETHTOOL_GFLAGS:
2700 rc = ethtool_get_value(dev, useraddr, ethcmd,
2701 __ethtool_get_flags);
2703 case ETHTOOL_SFLAGS:
2704 rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
2706 case ETHTOOL_GPFLAGS:
2707 rc = ethtool_get_value(dev, useraddr, ethcmd,
2708 dev->ethtool_ops->get_priv_flags);
2710 case ETHTOOL_SPFLAGS:
2711 rc = ethtool_set_value(dev, useraddr,
2712 dev->ethtool_ops->set_priv_flags);
2715 case ETHTOOL_GRXRINGS:
2716 case ETHTOOL_GRXCLSRLCNT:
2717 case ETHTOOL_GRXCLSRULE:
2718 case ETHTOOL_GRXCLSRLALL:
2719 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
2722 case ETHTOOL_SRXCLSRLDEL:
2723 case ETHTOOL_SRXCLSRLINS:
2724 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
2726 case ETHTOOL_FLASHDEV:
2727 rc = ethtool_flash_device(dev, useraddr);
2730 rc = ethtool_reset(dev, useraddr);
2732 case ETHTOOL_GSSET_INFO:
2733 rc = ethtool_get_sset_info(dev, useraddr);
2735 case ETHTOOL_GRXFHINDIR:
2736 rc = ethtool_get_rxfh_indir(dev, useraddr);
2738 case ETHTOOL_SRXFHINDIR:
2739 rc = ethtool_set_rxfh_indir(dev, useraddr);
2742 rc = ethtool_get_rxfh(dev, useraddr);
2745 rc = ethtool_set_rxfh(dev, useraddr);
2747 case ETHTOOL_GFEATURES:
2748 rc = ethtool_get_features(dev, useraddr);
2750 case ETHTOOL_SFEATURES:
2751 rc = ethtool_set_features(dev, useraddr);
2753 case ETHTOOL_GTXCSUM:
2754 case ETHTOOL_GRXCSUM:
2759 rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
2761 case ETHTOOL_STXCSUM:
2762 case ETHTOOL_SRXCSUM:
2767 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
2769 case ETHTOOL_GCHANNELS:
2770 rc = ethtool_get_channels(dev, useraddr);
2772 case ETHTOOL_SCHANNELS:
2773 rc = ethtool_set_channels(dev, useraddr);
2775 case ETHTOOL_SET_DUMP:
2776 rc = ethtool_set_dump(dev, useraddr);
2778 case ETHTOOL_GET_DUMP_FLAG:
2779 rc = ethtool_get_dump_flag(dev, useraddr);
2781 case ETHTOOL_GET_DUMP_DATA:
2782 rc = ethtool_get_dump_data(dev, useraddr);
2784 case ETHTOOL_GET_TS_INFO:
2785 rc = ethtool_get_ts_info(dev, useraddr);
2787 case ETHTOOL_GMODULEINFO:
2788 rc = ethtool_get_module_info(dev, useraddr);
2790 case ETHTOOL_GMODULEEEPROM:
2791 rc = ethtool_get_module_eeprom(dev, useraddr);
2793 case ETHTOOL_GTUNABLE:
2794 rc = ethtool_get_tunable(dev, useraddr);
2796 case ETHTOOL_STUNABLE:
2797 rc = ethtool_set_tunable(dev, useraddr);
2799 case ETHTOOL_GPHYSTATS:
2800 rc = ethtool_get_phy_stats(dev, useraddr);
2802 case ETHTOOL_PERQUEUE:
2803 rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
2805 case ETHTOOL_GLINKSETTINGS:
2806 rc = ethtool_get_link_ksettings(dev, useraddr);
2808 case ETHTOOL_SLINKSETTINGS:
2809 rc = ethtool_set_link_ksettings(dev, useraddr);
2811 case ETHTOOL_PHY_GTUNABLE:
2812 rc = get_phy_tunable(dev, useraddr);
2814 case ETHTOOL_PHY_STUNABLE:
2815 rc = set_phy_tunable(dev, useraddr);
2817 case ETHTOOL_GFECPARAM:
2818 rc = ethtool_get_fecparam(dev, useraddr);
2820 case ETHTOOL_SFECPARAM:
2821 rc = ethtool_set_fecparam(dev, useraddr);
2827 if (dev->ethtool_ops->complete)
2828 dev->ethtool_ops->complete(dev);
2830 if (old_features != dev->features)
2831 netdev_features_change(dev);
2836 struct ethtool_rx_flow_key {
2837 struct flow_dissector_key_basic basic;
2839 struct flow_dissector_key_ipv4_addrs ipv4;
2840 struct flow_dissector_key_ipv6_addrs ipv6;
2842 struct flow_dissector_key_ports tp;
2843 struct flow_dissector_key_ip ip;
2844 struct flow_dissector_key_vlan vlan;
2845 struct flow_dissector_key_eth_addrs eth_addrs;
2846 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
2848 struct ethtool_rx_flow_match {
2849 struct flow_dissector dissector;
2850 struct ethtool_rx_flow_key key;
2851 struct ethtool_rx_flow_key mask;
2854 struct ethtool_rx_flow_rule *
2855 ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
2857 const struct ethtool_rx_flow_spec *fs = input->fs;
2858 static struct in6_addr zero_addr = {};
2859 struct ethtool_rx_flow_match *match;
2860 struct ethtool_rx_flow_rule *flow;
2861 struct flow_action_entry *act;
2863 flow = kzalloc(sizeof(struct ethtool_rx_flow_rule) +
2864 sizeof(struct ethtool_rx_flow_match), GFP_KERNEL);
2866 return ERR_PTR(-ENOMEM);
2868 /* ethtool_rx supports only one single action per rule. */
2869 flow->rule = flow_rule_alloc(1);
2872 return ERR_PTR(-ENOMEM);
2875 match = (struct ethtool_rx_flow_match *)flow->priv;
2876 flow->rule->match.dissector = &match->dissector;
2877 flow->rule->match.mask = &match->mask;
2878 flow->rule->match.key = &match->key;
2880 match->mask.basic.n_proto = htons(0xffff);
2882 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
2885 const struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
2887 match->key.basic.n_proto = htons(ETH_P_IP);
2889 v4_spec = &fs->h_u.tcp_ip4_spec;
2890 v4_m_spec = &fs->m_u.tcp_ip4_spec;
2892 if (v4_m_spec->ip4src) {
2893 match->key.ipv4.src = v4_spec->ip4src;
2894 match->mask.ipv4.src = v4_m_spec->ip4src;
2896 if (v4_m_spec->ip4dst) {
2897 match->key.ipv4.dst = v4_spec->ip4dst;
2898 match->mask.ipv4.dst = v4_m_spec->ip4dst;
2900 if (v4_m_spec->ip4src ||
2901 v4_m_spec->ip4dst) {
2902 match->dissector.used_keys |=
2903 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
2904 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] =
2905 offsetof(struct ethtool_rx_flow_key, ipv4);
2907 if (v4_m_spec->psrc) {
2908 match->key.tp.src = v4_spec->psrc;
2909 match->mask.tp.src = v4_m_spec->psrc;
2911 if (v4_m_spec->pdst) {
2912 match->key.tp.dst = v4_spec->pdst;
2913 match->mask.tp.dst = v4_m_spec->pdst;
2915 if (v4_m_spec->psrc ||
2917 match->dissector.used_keys |=
2918 BIT(FLOW_DISSECTOR_KEY_PORTS);
2919 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
2920 offsetof(struct ethtool_rx_flow_key, tp);
2922 if (v4_m_spec->tos) {
2923 match->key.ip.tos = v4_spec->tos;
2924 match->mask.ip.tos = v4_m_spec->tos;
2925 match->dissector.used_keys |=
2926 BIT(FLOW_DISSECTOR_KEY_IP);
2927 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
2928 offsetof(struct ethtool_rx_flow_key, ip);
2934 const struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
2936 match->key.basic.n_proto = htons(ETH_P_IPV6);
2938 v6_spec = &fs->h_u.tcp_ip6_spec;
2939 v6_m_spec = &fs->m_u.tcp_ip6_spec;
2940 if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
2941 memcpy(&match->key.ipv6.src, v6_spec->ip6src,
2942 sizeof(match->key.ipv6.src));
2943 memcpy(&match->mask.ipv6.src, v6_m_spec->ip6src,
2944 sizeof(match->mask.ipv6.src));
2946 if (memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
2947 memcpy(&match->key.ipv6.dst, v6_spec->ip6dst,
2948 sizeof(match->key.ipv6.dst));
2949 memcpy(&match->mask.ipv6.dst, v6_m_spec->ip6dst,
2950 sizeof(match->mask.ipv6.dst));
2952 if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
2953 memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
2954 match->dissector.used_keys |=
2955 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
2956 match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
2957 offsetof(struct ethtool_rx_flow_key, ipv6);
2959 if (v6_m_spec->psrc) {
2960 match->key.tp.src = v6_spec->psrc;
2961 match->mask.tp.src = v6_m_spec->psrc;
2963 if (v6_m_spec->pdst) {
2964 match->key.tp.dst = v6_spec->pdst;
2965 match->mask.tp.dst = v6_m_spec->pdst;
2967 if (v6_m_spec->psrc ||
2969 match->dissector.used_keys |=
2970 BIT(FLOW_DISSECTOR_KEY_PORTS);
2971 match->dissector.offset[FLOW_DISSECTOR_KEY_PORTS] =
2972 offsetof(struct ethtool_rx_flow_key, tp);
2974 if (v6_m_spec->tclass) {
2975 match->key.ip.tos = v6_spec->tclass;
2976 match->mask.ip.tos = v6_m_spec->tclass;
2977 match->dissector.used_keys |=
2978 BIT(FLOW_DISSECTOR_KEY_IP);
2979 match->dissector.offset[FLOW_DISSECTOR_KEY_IP] =
2980 offsetof(struct ethtool_rx_flow_key, ip);
2985 ethtool_rx_flow_rule_destroy(flow);
2986 return ERR_PTR(-EINVAL);
2989 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
2992 match->key.basic.ip_proto = IPPROTO_TCP;
2996 match->key.basic.ip_proto = IPPROTO_UDP;
2999 match->mask.basic.ip_proto = 0xff;
3001 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
3002 match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] =
3003 offsetof(struct ethtool_rx_flow_key, basic);
3005 if (fs->flow_type & FLOW_EXT) {
3006 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
3007 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
3009 if (ext_m_spec->vlan_etype) {
3010 match->key.vlan.vlan_tpid = ext_h_spec->vlan_etype;
3011 match->mask.vlan.vlan_tpid = ext_m_spec->vlan_etype;
3014 if (ext_m_spec->vlan_tci) {
3015 match->key.vlan.vlan_id =
3016 ntohs(ext_h_spec->vlan_tci) & 0x0fff;
3017 match->mask.vlan.vlan_id =
3018 ntohs(ext_m_spec->vlan_tci) & 0x0fff;
3020 match->key.vlan.vlan_priority =
3021 (ntohs(ext_h_spec->vlan_tci) & 0xe000) >> 13;
3022 match->mask.vlan.vlan_priority =
3023 (ntohs(ext_m_spec->vlan_tci) & 0xe000) >> 13;
3026 if (ext_m_spec->vlan_etype ||
3027 ext_m_spec->vlan_tci) {
3028 match->dissector.used_keys |=
3029 BIT(FLOW_DISSECTOR_KEY_VLAN);
3030 match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
3031 offsetof(struct ethtool_rx_flow_key, vlan);
3034 if (fs->flow_type & FLOW_MAC_EXT) {
3035 const struct ethtool_flow_ext *ext_h_spec = &fs->h_ext;
3036 const struct ethtool_flow_ext *ext_m_spec = &fs->m_ext;
3038 memcpy(match->key.eth_addrs.dst, ext_h_spec->h_dest,
3040 memcpy(match->mask.eth_addrs.dst, ext_m_spec->h_dest,
3043 match->dissector.used_keys |=
3044 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
3045 match->dissector.offset[FLOW_DISSECTOR_KEY_ETH_ADDRS] =
3046 offsetof(struct ethtool_rx_flow_key, eth_addrs);
3049 act = &flow->rule->action.entries[0];
3050 switch (fs->ring_cookie) {
3051 case RX_CLS_FLOW_DISC:
3052 act->id = FLOW_ACTION_DROP;
3054 case RX_CLS_FLOW_WAKE:
3055 act->id = FLOW_ACTION_WAKE;
3058 act->id = FLOW_ACTION_QUEUE;
3059 if (fs->flow_type & FLOW_RSS)
3060 act->queue.ctx = input->rss_ctx;
3062 act->queue.vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
3063 act->queue.index = ethtool_get_flow_spec_ring(fs->ring_cookie);
3069 EXPORT_SYMBOL(ethtool_rx_flow_rule_create);
3071 void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *flow)
3076 EXPORT_SYMBOL(ethtool_rx_flow_rule_destroy);