Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * net-sysfs.c - network device class and attributes | |
4 | * | |
5 | * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> | |
1da177e4 LT |
6 | */ |
7 | ||
4fc268d2 | 8 | #include <linux/capability.h> |
1da177e4 LT |
9 | #include <linux/kernel.h> |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/if_arp.h> | |
5a0e3ad6 | 12 | #include <linux/slab.h> |
174cd4b1 | 13 | #include <linux/sched/signal.h> |
07bbecb3 | 14 | #include <linux/sched/isolation.h> |
608b4b95 | 15 | #include <linux/nsproxy.h> |
1da177e4 | 16 | #include <net/sock.h> |
608b4b95 | 17 | #include <net/net_namespace.h> |
1da177e4 | 18 | #include <linux/rtnetlink.h> |
fec5e652 | 19 | #include <linux/vmalloc.h> |
bc3b2d7f | 20 | #include <linux/export.h> |
114cf580 | 21 | #include <linux/jiffies.h> |
9802c8e2 | 22 | #include <linux/pm_runtime.h> |
aa836df9 | 23 | #include <linux/of.h> |
88832a22 | 24 | #include <linux/of_net.h> |
4d99f660 | 25 | #include <linux/cpu.h> |
8ef890df | 26 | #include <net/netdev_lock.h> |
49e47a5b | 27 | #include <net/netdev_rx_queue.h> |
490a79fa | 28 | #include <net/rps.h> |
1da177e4 | 29 | |
6264f58c | 30 | #include "dev.h" |
342709ef PE |
31 | #include "net-sysfs.h" |
32 | ||
8b41d188 | 33 | #ifdef CONFIG_SYSFS |
1da177e4 LT |
34 | static const char fmt_hex[] = "%#x\n"; |
35 | static const char fmt_dec[] = "%d\n"; | |
08062af0 | 36 | static const char fmt_uint[] = "%u\n"; |
1da177e4 | 37 | static const char fmt_ulong[] = "%lu\n"; |
be1f3c2c | 38 | static const char fmt_u64[] = "%llu\n"; |
1da177e4 | 39 | |
1bb86cf8 | 40 | /* Caller holds RTNL, netdev->lock or RCU */ |
4ec93edb | 41 | static inline int dev_isalive(const struct net_device *dev) |
1da177e4 | 42 | { |
12692e3d | 43 | return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED; |
1da177e4 LT |
44 | } |
45 | ||
79c61899 AT |
46 | /* There is a possible ABBA deadlock between rtnl_lock and kernfs_node->active, |
47 | * when unregistering a net device and accessing associated sysfs files. The | |
48 | * potential deadlock is as follow: | |
49 | * | |
50 | * CPU 0 CPU 1 | |
51 | * | |
52 | * rtnl_lock vfs_read | |
53 | * unregister_netdevice_many kernfs_seq_start | |
54 | * device_del / kobject_put kernfs_get_active (kn->active++) | |
55 | * kernfs_drain sysfs_kf_seq_show | |
56 | * wait_event( rtnl_lock | |
57 | * kn->active == KN_DEACTIVATED_BIAS) -> waits on CPU 0 to release | |
58 | * -> waits on CPU 1 to decrease kn->active the rtnl lock. | |
59 | * | |
60 | * The historical fix was to use rtnl_trylock with restart_syscall to bail out | |
61 | * of sysfs operations when the lock couldn't be taken. This fixed the above | |
62 | * issue as it allowed CPU 1 to bail out of the ABBA situation. | |
63 | * | |
64 | * But it came with performances issues, as syscalls are being restarted in | |
65 | * loops when there was contention on the rtnl lock, with huge slow downs in | |
66 | * specific scenarios (e.g. lots of virtual interfaces created and userspace | |
67 | * daemons querying their attributes). | |
68 | * | |
69 | * The idea below is to bail out of the active kernfs_node protection | |
70 | * (kn->active) while trying to take the rtnl lock. | |
71 | * | |
72 | * This replaces rtnl_lock() and still has to be used with rtnl_unlock(). The | |
73 | * net device is guaranteed to be alive if this returns successfully. | |
74 | */ | |
75 | static int sysfs_rtnl_lock(struct kobject *kobj, struct attribute *attr, | |
76 | struct net_device *ndev) | |
77 | { | |
78 | struct kernfs_node *kn; | |
79 | int ret = 0; | |
80 | ||
81 | /* First, we hold a reference to the net device as the unregistration | |
82 | * path might run in parallel. This will ensure the net device and the | |
83 | * associated sysfs objects won't be freed while we try to take the rtnl | |
84 | * lock. | |
85 | */ | |
86 | dev_hold(ndev); | |
87 | /* sysfs_break_active_protection was introduced to allow self-removal of | |
88 | * devices and their associated sysfs files by bailing out of the | |
89 | * sysfs/kernfs protection. We do this here to allow the unregistration | |
90 | * path to complete in parallel. The following takes a reference on the | |
91 | * kobject and the kernfs_node being accessed. | |
92 | * | |
93 | * This works because we hold a reference onto the net device and the | |
94 | * unregistration path will wait for us eventually in netdev_run_todo | |
95 | * (outside an rtnl lock section). | |
96 | */ | |
97 | kn = sysfs_break_active_protection(kobj, attr); | |
98 | /* We can now try to take the rtnl lock. This can't deadlock us as the | |
99 | * unregistration path is able to drain sysfs files (kernfs_node) thanks | |
100 | * to the above dance. | |
101 | */ | |
102 | if (rtnl_lock_interruptible()) { | |
103 | ret = -ERESTARTSYS; | |
104 | goto unbreak; | |
105 | } | |
106 | /* Check dismantle on the device hasn't started, otherwise deny the | |
107 | * operation. | |
108 | */ | |
109 | if (!dev_isalive(ndev)) { | |
110 | rtnl_unlock(); | |
111 | ret = -ENODEV; | |
112 | goto unbreak; | |
113 | } | |
114 | /* We are now sure the device dismantle hasn't started nor that it can | |
115 | * start before we exit the locking section as we hold the rtnl lock. | |
116 | * There's no need to keep unbreaking the sysfs protection nor to hold | |
117 | * a net device reference from that point; that was only needed to take | |
118 | * the rtnl lock. | |
119 | */ | |
120 | unbreak: | |
121 | sysfs_unbreak_active_protection(kn); | |
122 | dev_put(ndev); | |
123 | ||
124 | return ret; | |
125 | } | |
126 | ||
1da177e4 | 127 | /* use same locking rules as GIF* ioctl's */ |
43cb76d9 GKH |
128 | static ssize_t netdev_show(const struct device *dev, |
129 | struct device_attribute *attr, char *buf, | |
1da177e4 LT |
130 | ssize_t (*format)(const struct net_device *, char *)) |
131 | { | |
6b53dafe | 132 | struct net_device *ndev = to_net_dev(dev); |
1da177e4 LT |
133 | ssize_t ret = -EINVAL; |
134 | ||
12692e3d | 135 | rcu_read_lock(); |
6b53dafe WC |
136 | if (dev_isalive(ndev)) |
137 | ret = (*format)(ndev, buf); | |
12692e3d | 138 | rcu_read_unlock(); |
1da177e4 LT |
139 | |
140 | return ret; | |
141 | } | |
142 | ||
143 | /* generate a show function for simple field */ | |
144 | #define NETDEVICE_SHOW(field, format_string) \ | |
6b53dafe | 145 | static ssize_t format_##field(const struct net_device *dev, char *buf) \ |
1da177e4 | 146 | { \ |
12692e3d | 147 | return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \ |
1da177e4 | 148 | } \ |
6be8aeef | 149 | static ssize_t field##_show(struct device *dev, \ |
43cb76d9 | 150 | struct device_attribute *attr, char *buf) \ |
1da177e4 | 151 | { \ |
43cb76d9 | 152 | return netdev_show(dev, attr, buf, format_##field); \ |
6be8aeef GKH |
153 | } \ |
154 | ||
155 | #define NETDEVICE_SHOW_RO(field, format_string) \ | |
156 | NETDEVICE_SHOW(field, format_string); \ | |
157 | static DEVICE_ATTR_RO(field) | |
1da177e4 | 158 | |
6be8aeef GKH |
159 | #define NETDEVICE_SHOW_RW(field, format_string) \ |
160 | NETDEVICE_SHOW(field, format_string); \ | |
161 | static DEVICE_ATTR_RW(field) | |
1da177e4 LT |
162 | |
163 | /* use same locking and permission rules as SIF* ioctl's */ | |
43cb76d9 | 164 | static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, |
1da177e4 LT |
165 | const char *buf, size_t len, |
166 | int (*set)(struct net_device *, unsigned long)) | |
167 | { | |
5e1fccc0 EB |
168 | struct net_device *netdev = to_net_dev(dev); |
169 | struct net *net = dev_net(netdev); | |
1da177e4 | 170 | unsigned long new; |
5f0224a6 | 171 | int ret; |
1da177e4 | 172 | |
5e1fccc0 | 173 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
1da177e4 LT |
174 | return -EPERM; |
175 | ||
e1e420c7 SK |
176 | ret = kstrtoul(buf, 0, &new); |
177 | if (ret) | |
1da177e4 LT |
178 | goto err; |
179 | ||
79c61899 AT |
180 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
181 | if (ret) | |
182 | goto err; | |
183 | ||
184 | ret = (*set)(netdev, new); | |
185 | if (ret == 0) | |
186 | ret = len; | |
5a5990d3 | 187 | |
1da177e4 LT |
188 | rtnl_unlock(); |
189 | err: | |
190 | return ret; | |
191 | } | |
192 | ||
1bb86cf8 JK |
193 | /* Same as netdev_store() but takes netdev_lock() instead of rtnl_lock() */ |
194 | static ssize_t | |
195 | netdev_lock_store(struct device *dev, struct device_attribute *attr, | |
196 | const char *buf, size_t len, | |
197 | int (*set)(struct net_device *, unsigned long)) | |
198 | { | |
199 | struct net_device *netdev = to_net_dev(dev); | |
200 | struct net *net = dev_net(netdev); | |
201 | unsigned long new; | |
202 | int ret; | |
203 | ||
204 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) | |
205 | return -EPERM; | |
206 | ||
207 | ret = kstrtoul(buf, 0, &new); | |
208 | if (ret) | |
209 | return ret; | |
210 | ||
211 | netdev_lock(netdev); | |
212 | ||
213 | if (dev_isalive(netdev)) { | |
214 | ret = (*set)(netdev, new); | |
215 | if (ret == 0) | |
216 | ret = len; | |
217 | } | |
218 | netdev_unlock(netdev); | |
219 | ||
220 | return ret; | |
221 | } | |
222 | ||
6be8aeef | 223 | NETDEVICE_SHOW_RO(dev_id, fmt_hex); |
3f85944f | 224 | NETDEVICE_SHOW_RO(dev_port, fmt_dec); |
6be8aeef GKH |
225 | NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); |
226 | NETDEVICE_SHOW_RO(addr_len, fmt_dec); | |
6be8aeef GKH |
227 | NETDEVICE_SHOW_RO(ifindex, fmt_dec); |
228 | NETDEVICE_SHOW_RO(type, fmt_dec); | |
229 | NETDEVICE_SHOW_RO(link_mode, fmt_dec); | |
1da177e4 | 230 | |
a54acb3a ND |
231 | static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, |
232 | char *buf) | |
233 | { | |
234 | struct net_device *ndev = to_net_dev(dev); | |
235 | ||
73c2e90a | 236 | return sysfs_emit(buf, fmt_dec, dev_get_iflink(ndev)); |
a54acb3a ND |
237 | } |
238 | static DEVICE_ATTR_RO(iflink); | |
239 | ||
6b53dafe | 240 | static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) |
685343fc | 241 | { |
1c07dbb0 | 242 | return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type)); |
685343fc TG |
243 | } |
244 | ||
245 | static ssize_t name_assign_type_show(struct device *dev, | |
246 | struct device_attribute *attr, | |
247 | char *buf) | |
248 | { | |
6b53dafe | 249 | struct net_device *ndev = to_net_dev(dev); |
685343fc TG |
250 | ssize_t ret = -EINVAL; |
251 | ||
1c07dbb0 | 252 | if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN) |
685343fc TG |
253 | ret = netdev_show(dev, attr, buf, format_name_assign_type); |
254 | ||
255 | return ret; | |
256 | } | |
257 | static DEVICE_ATTR_RO(name_assign_type); | |
258 | ||
c7d52737 | 259 | /* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */ |
6be8aeef | 260 | static ssize_t address_show(struct device *dev, struct device_attribute *attr, |
43cb76d9 | 261 | char *buf) |
1da177e4 | 262 | { |
6b53dafe | 263 | struct net_device *ndev = to_net_dev(dev); |
1da177e4 LT |
264 | ssize_t ret = -EINVAL; |
265 | ||
8033d2ae SF |
266 | down_read(&dev_addr_sem); |
267 | ||
268 | rcu_read_lock(); | |
6b53dafe WC |
269 | if (dev_isalive(ndev)) |
270 | ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); | |
8033d2ae | 271 | rcu_read_unlock(); |
c7d52737 | 272 | |
8033d2ae | 273 | up_read(&dev_addr_sem); |
1da177e4 LT |
274 | return ret; |
275 | } | |
6be8aeef | 276 | static DEVICE_ATTR_RO(address); |
1da177e4 | 277 | |
6be8aeef GKH |
278 | static ssize_t broadcast_show(struct device *dev, |
279 | struct device_attribute *attr, char *buf) | |
1da177e4 | 280 | { |
6b53dafe | 281 | struct net_device *ndev = to_net_dev(dev); |
12692e3d | 282 | int ret = -EINVAL; |
6648c65e | 283 | |
12692e3d | 284 | rcu_read_lock(); |
6b53dafe | 285 | if (dev_isalive(ndev)) |
12692e3d ED |
286 | ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); |
287 | rcu_read_unlock(); | |
288 | return ret; | |
1da177e4 | 289 | } |
6be8aeef | 290 | static DEVICE_ATTR_RO(broadcast); |
1da177e4 | 291 | |
6b53dafe | 292 | static int change_carrier(struct net_device *dev, unsigned long new_carrier) |
fdae0fde | 293 | { |
6b53dafe | 294 | if (!netif_running(dev)) |
fdae0fde | 295 | return -EINVAL; |
6648c65e | 296 | return dev_change_carrier(dev, (bool)new_carrier); |
fdae0fde JP |
297 | } |
298 | ||
6be8aeef GKH |
299 | static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, |
300 | const char *buf, size_t len) | |
fdae0fde | 301 | { |
146e5e73 AT |
302 | struct net_device *netdev = to_net_dev(dev); |
303 | ||
304 | /* The check is also done in change_carrier; this helps returning early | |
79c61899 | 305 | * without hitting the locking section in netdev_store. |
146e5e73 AT |
306 | */ |
307 | if (!netdev->netdev_ops->ndo_change_carrier) | |
308 | return -EOPNOTSUPP; | |
309 | ||
fdae0fde JP |
310 | return netdev_store(dev, attr, buf, len, change_carrier); |
311 | } | |
312 | ||
6be8aeef | 313 | static ssize_t carrier_show(struct device *dev, |
43cb76d9 | 314 | struct device_attribute *attr, char *buf) |
1da177e4 LT |
315 | { |
316 | struct net_device *netdev = to_net_dev(dev); | |
75bc3dab | 317 | int ret; |
bf17b36c | 318 | |
79c61899 AT |
319 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
320 | if (ret) | |
321 | return ret; | |
6648c65e | 322 | |
75bc3dab | 323 | ret = -EINVAL; |
facd15df JB |
324 | if (netif_running(netdev)) { |
325 | /* Synchronize carrier state with link watch, | |
326 | * see also rtnl_getlink(). | |
327 | */ | |
328 | linkwatch_sync_dev(netdev); | |
329 | ||
bf17b36c | 330 | ret = sysfs_emit(buf, fmt_dec, !!netif_carrier_ok(netdev)); |
facd15df | 331 | } |
6648c65e | 332 | |
79c61899 | 333 | rtnl_unlock(); |
bf17b36c | 334 | return ret; |
1da177e4 | 335 | } |
6be8aeef | 336 | static DEVICE_ATTR_RW(carrier); |
1da177e4 | 337 | |
6be8aeef | 338 | static ssize_t speed_show(struct device *dev, |
d519e17e AG |
339 | struct device_attribute *attr, char *buf) |
340 | { | |
341 | struct net_device *netdev = to_net_dev(dev); | |
342 | int ret = -EINVAL; | |
343 | ||
146e5e73 | 344 | /* The check is also done in __ethtool_get_link_ksettings; this helps |
79c61899 | 345 | * returning early without hitting the locking section below. |
146e5e73 AT |
346 | */ |
347 | if (!netdev->ethtool_ops->get_link_ksettings) | |
348 | return ret; | |
349 | ||
79c61899 AT |
350 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
351 | if (ret) | |
352 | return ret; | |
d519e17e | 353 | |
75bc3dab | 354 | ret = -EINVAL; |
a699781c | 355 | if (netif_running(netdev)) { |
7cad1bac DD |
356 | struct ethtool_link_ksettings cmd; |
357 | ||
358 | if (!__ethtool_get_link_ksettings(netdev, &cmd)) | |
73c2e90a | 359 | ret = sysfs_emit(buf, fmt_dec, cmd.base.speed); |
d519e17e AG |
360 | } |
361 | rtnl_unlock(); | |
362 | return ret; | |
363 | } | |
6be8aeef | 364 | static DEVICE_ATTR_RO(speed); |
d519e17e | 365 | |
6be8aeef | 366 | static ssize_t duplex_show(struct device *dev, |
d519e17e AG |
367 | struct device_attribute *attr, char *buf) |
368 | { | |
369 | struct net_device *netdev = to_net_dev(dev); | |
370 | int ret = -EINVAL; | |
371 | ||
146e5e73 | 372 | /* The check is also done in __ethtool_get_link_ksettings; this helps |
79c61899 | 373 | * returning early without hitting the locking section below. |
146e5e73 AT |
374 | */ |
375 | if (!netdev->ethtool_ops->get_link_ksettings) | |
376 | return ret; | |
377 | ||
79c61899 AT |
378 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
379 | if (ret) | |
380 | return ret; | |
d519e17e | 381 | |
75bc3dab | 382 | ret = -EINVAL; |
8ae6daca | 383 | if (netif_running(netdev)) { |
7cad1bac DD |
384 | struct ethtool_link_ksettings cmd; |
385 | ||
386 | if (!__ethtool_get_link_ksettings(netdev, &cmd)) { | |
c6c13965 | 387 | const char *duplex; |
7cad1bac DD |
388 | |
389 | switch (cmd.base.duplex) { | |
c6c13965 NA |
390 | case DUPLEX_HALF: |
391 | duplex = "half"; | |
392 | break; | |
393 | case DUPLEX_FULL: | |
394 | duplex = "full"; | |
395 | break; | |
396 | default: | |
397 | duplex = "unknown"; | |
398 | break; | |
399 | } | |
73c2e90a | 400 | ret = sysfs_emit(buf, "%s\n", duplex); |
c6c13965 | 401 | } |
d519e17e AG |
402 | } |
403 | rtnl_unlock(); | |
404 | return ret; | |
405 | } | |
6be8aeef | 406 | static DEVICE_ATTR_RO(duplex); |
d519e17e | 407 | |
db30a577 AL |
408 | static ssize_t testing_show(struct device *dev, |
409 | struct device_attribute *attr, char *buf) | |
410 | { | |
411 | struct net_device *netdev = to_net_dev(dev); | |
412 | ||
413 | if (netif_running(netdev)) | |
73c2e90a | 414 | return sysfs_emit(buf, fmt_dec, !!netif_testing(netdev)); |
db30a577 AL |
415 | |
416 | return -EINVAL; | |
417 | } | |
418 | static DEVICE_ATTR_RO(testing); | |
419 | ||
6be8aeef | 420 | static ssize_t dormant_show(struct device *dev, |
43cb76d9 | 421 | struct device_attribute *attr, char *buf) |
b00055aa SR |
422 | { |
423 | struct net_device *netdev = to_net_dev(dev); | |
424 | ||
425 | if (netif_running(netdev)) | |
73c2e90a | 426 | return sysfs_emit(buf, fmt_dec, !!netif_dormant(netdev)); |
b00055aa SR |
427 | |
428 | return -EINVAL; | |
429 | } | |
6be8aeef | 430 | static DEVICE_ATTR_RO(dormant); |
b00055aa | 431 | |
36cbd3dc | 432 | static const char *const operstates[] = { |
b00055aa SR |
433 | "unknown", |
434 | "notpresent", /* currently unused */ | |
435 | "down", | |
436 | "lowerlayerdown", | |
db30a577 | 437 | "testing", |
b00055aa SR |
438 | "dormant", |
439 | "up" | |
440 | }; | |
441 | ||
6be8aeef | 442 | static ssize_t operstate_show(struct device *dev, |
43cb76d9 | 443 | struct device_attribute *attr, char *buf) |
b00055aa SR |
444 | { |
445 | const struct net_device *netdev = to_net_dev(dev); | |
446 | unsigned char operstate; | |
447 | ||
004d1383 | 448 | operstate = READ_ONCE(netdev->operstate); |
b00055aa SR |
449 | if (!netif_running(netdev)) |
450 | operstate = IF_OPER_DOWN; | |
b00055aa | 451 | |
e3a5cd9e | 452 | if (operstate >= ARRAY_SIZE(operstates)) |
b00055aa SR |
453 | return -EINVAL; /* should not happen */ |
454 | ||
73c2e90a | 455 | return sysfs_emit(buf, "%s\n", operstates[operstate]); |
b00055aa | 456 | } |
6be8aeef | 457 | static DEVICE_ATTR_RO(operstate); |
b00055aa | 458 | |
2d3b479d | 459 | static ssize_t carrier_changes_show(struct device *dev, |
460 | struct device_attribute *attr, | |
461 | char *buf) | |
462 | { | |
463 | struct net_device *netdev = to_net_dev(dev); | |
6648c65e | 464 | |
73c2e90a WY |
465 | return sysfs_emit(buf, fmt_dec, |
466 | atomic_read(&netdev->carrier_up_count) + | |
467 | atomic_read(&netdev->carrier_down_count)); | |
2d3b479d | 468 | } |
469 | static DEVICE_ATTR_RO(carrier_changes); | |
470 | ||
b2d3bcfa DD |
471 | static ssize_t carrier_up_count_show(struct device *dev, |
472 | struct device_attribute *attr, | |
473 | char *buf) | |
474 | { | |
475 | struct net_device *netdev = to_net_dev(dev); | |
476 | ||
73c2e90a | 477 | return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); |
b2d3bcfa DD |
478 | } |
479 | static DEVICE_ATTR_RO(carrier_up_count); | |
480 | ||
481 | static ssize_t carrier_down_count_show(struct device *dev, | |
482 | struct device_attribute *attr, | |
483 | char *buf) | |
484 | { | |
485 | struct net_device *netdev = to_net_dev(dev); | |
486 | ||
73c2e90a | 487 | return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); |
b2d3bcfa DD |
488 | } |
489 | static DEVICE_ATTR_RO(carrier_down_count); | |
490 | ||
1da177e4 | 491 | /* read-write attributes */ |
1da177e4 | 492 | |
6b53dafe | 493 | static int change_mtu(struct net_device *dev, unsigned long new_mtu) |
1da177e4 | 494 | { |
6648c65e | 495 | return dev_set_mtu(dev, (int)new_mtu); |
1da177e4 LT |
496 | } |
497 | ||
6be8aeef | 498 | static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, |
43cb76d9 | 499 | const char *buf, size_t len) |
1da177e4 | 500 | { |
43cb76d9 | 501 | return netdev_store(dev, attr, buf, len, change_mtu); |
1da177e4 | 502 | } |
6be8aeef | 503 | NETDEVICE_SHOW_RW(mtu, fmt_dec); |
1da177e4 | 504 | |
6b53dafe | 505 | static int change_flags(struct net_device *dev, unsigned long new_flags) |
1da177e4 | 506 | { |
567c5e13 | 507 | return dev_change_flags(dev, (unsigned int)new_flags, NULL); |
1da177e4 LT |
508 | } |
509 | ||
6be8aeef | 510 | static ssize_t flags_store(struct device *dev, struct device_attribute *attr, |
43cb76d9 | 511 | const char *buf, size_t len) |
1da177e4 | 512 | { |
43cb76d9 | 513 | return netdev_store(dev, attr, buf, len, change_flags); |
1da177e4 | 514 | } |
6be8aeef | 515 | NETDEVICE_SHOW_RW(flags, fmt_hex); |
1da177e4 | 516 | |
6be8aeef | 517 | static ssize_t tx_queue_len_store(struct device *dev, |
43cb76d9 GKH |
518 | struct device_attribute *attr, |
519 | const char *buf, size_t len) | |
1da177e4 | 520 | { |
5e1fccc0 EB |
521 | if (!capable(CAP_NET_ADMIN)) |
522 | return -EPERM; | |
523 | ||
6a643ddb | 524 | return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); |
1da177e4 | 525 | } |
0cd29503 | 526 | NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); |
1da177e4 | 527 | |
3b47d303 ED |
528 | static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) |
529 | { | |
acb8d4ed | 530 | netdev_set_gro_flush_timeout(dev, val); |
3b47d303 ED |
531 | return 0; |
532 | } | |
533 | ||
534 | static ssize_t gro_flush_timeout_store(struct device *dev, | |
6648c65e | 535 | struct device_attribute *attr, |
536 | const char *buf, size_t len) | |
3b47d303 ED |
537 | { |
538 | if (!capable(CAP_NET_ADMIN)) | |
539 | return -EPERM; | |
540 | ||
e7ed2ba7 | 541 | return netdev_lock_store(dev, attr, buf, len, change_gro_flush_timeout); |
3b47d303 ED |
542 | } |
543 | NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); | |
544 | ||
6f8b12d6 ED |
545 | static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) |
546 | { | |
08062af0 JD |
547 | if (val > S32_MAX) |
548 | return -ERANGE; | |
549 | ||
f15e3b3d | 550 | netdev_set_defer_hard_irqs(dev, (u32)val); |
6f8b12d6 ED |
551 | return 0; |
552 | } | |
553 | ||
554 | static ssize_t napi_defer_hard_irqs_store(struct device *dev, | |
555 | struct device_attribute *attr, | |
556 | const char *buf, size_t len) | |
557 | { | |
558 | if (!capable(CAP_NET_ADMIN)) | |
559 | return -EPERM; | |
560 | ||
e7ed2ba7 JK |
561 | return netdev_lock_store(dev, attr, buf, len, |
562 | change_napi_defer_hard_irqs); | |
6f8b12d6 | 563 | } |
08062af0 | 564 | NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint); |
6f8b12d6 | 565 | |
6be8aeef | 566 | static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, |
0b815a1a SH |
567 | const char *buf, size_t len) |
568 | { | |
569 | struct net_device *netdev = to_net_dev(dev); | |
5e1fccc0 | 570 | struct net *net = dev_net(netdev); |
0b815a1a | 571 | size_t count = len; |
38d41cf5 | 572 | ssize_t ret; |
0b815a1a | 573 | |
5e1fccc0 | 574 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
0b815a1a SH |
575 | return -EPERM; |
576 | ||
577 | /* ignore trailing newline */ | |
578 | if (len > 0 && buf[len - 1] == '\n') | |
579 | --count; | |
580 | ||
79c61899 AT |
581 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
582 | if (ret) | |
583 | return ret; | |
0b815a1a | 584 | |
79c61899 AT |
585 | ret = dev_set_alias(netdev, buf, count); |
586 | if (ret < 0) | |
587 | goto err; | |
588 | ret = len; | |
589 | netdev_state_change(netdev); | |
c92eb77a RP |
590 | err: |
591 | rtnl_unlock(); | |
592 | ||
593 | return ret; | |
0b815a1a SH |
594 | } |
595 | ||
6be8aeef | 596 | static ssize_t ifalias_show(struct device *dev, |
0b815a1a SH |
597 | struct device_attribute *attr, char *buf) |
598 | { | |
599 | const struct net_device *netdev = to_net_dev(dev); | |
6c557001 | 600 | char tmp[IFALIASZ]; |
38d41cf5 | 601 | ssize_t ret; |
0b815a1a | 602 | |
6c557001 FW |
603 | ret = dev_get_alias(netdev, tmp, sizeof(tmp)); |
604 | if (ret > 0) | |
73c2e90a | 605 | ret = sysfs_emit(buf, "%s\n", tmp); |
0b815a1a SH |
606 | return ret; |
607 | } | |
6be8aeef | 608 | static DEVICE_ATTR_RW(ifalias); |
a512b92b | 609 | |
6b53dafe | 610 | static int change_group(struct net_device *dev, unsigned long new_group) |
a512b92b | 611 | { |
6648c65e | 612 | dev_set_group(dev, (int)new_group); |
a512b92b VD |
613 | return 0; |
614 | } | |
615 | ||
6be8aeef GKH |
616 | static ssize_t group_store(struct device *dev, struct device_attribute *attr, |
617 | const char *buf, size_t len) | |
a512b92b VD |
618 | { |
619 | return netdev_store(dev, attr, buf, len, change_group); | |
620 | } | |
6be8aeef | 621 | NETDEVICE_SHOW(group, fmt_dec); |
d6444062 | 622 | static DEVICE_ATTR(netdev_group, 0644, group_show, group_store); |
6be8aeef | 623 | |
d746d707 AK |
624 | static int change_proto_down(struct net_device *dev, unsigned long proto_down) |
625 | { | |
6648c65e | 626 | return dev_change_proto_down(dev, (bool)proto_down); |
d746d707 AK |
627 | } |
628 | ||
629 | static ssize_t proto_down_store(struct device *dev, | |
630 | struct device_attribute *attr, | |
631 | const char *buf, size_t len) | |
632 | { | |
633 | return netdev_store(dev, attr, buf, len, change_proto_down); | |
634 | } | |
635 | NETDEVICE_SHOW_RW(proto_down, fmt_dec); | |
636 | ||
cc998ff8 | 637 | static ssize_t phys_port_id_show(struct device *dev, |
ff80e519 JP |
638 | struct device_attribute *attr, char *buf) |
639 | { | |
640 | struct net_device *netdev = to_net_dev(dev); | |
79c61899 | 641 | struct netdev_phys_item_id ppid; |
38d41cf5 | 642 | ssize_t ret; |
ff80e519 | 643 | |
146e5e73 | 644 | /* The check is also done in dev_get_phys_port_id; this helps returning |
79c61899 | 645 | * early without hitting the locking section below. |
146e5e73 AT |
646 | */ |
647 | if (!netdev->netdev_ops->ndo_get_phys_port_id) | |
648 | return -EOPNOTSUPP; | |
649 | ||
79c61899 AT |
650 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
651 | if (ret) | |
652 | return ret; | |
ff80e519 | 653 | |
79c61899 AT |
654 | ret = dev_get_phys_port_id(netdev, &ppid); |
655 | if (!ret) | |
656 | ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); | |
ff80e519 | 657 | |
ff80e519 JP |
658 | rtnl_unlock(); |
659 | ||
660 | return ret; | |
661 | } | |
cc998ff8 LT |
662 | static DEVICE_ATTR_RO(phys_port_id); |
663 | ||
db24a904 DA |
664 | static ssize_t phys_port_name_show(struct device *dev, |
665 | struct device_attribute *attr, char *buf) | |
666 | { | |
667 | struct net_device *netdev = to_net_dev(dev); | |
79c61899 | 668 | char name[IFNAMSIZ]; |
38d41cf5 | 669 | ssize_t ret; |
db24a904 | 670 | |
146e5e73 | 671 | /* The checks are also done in dev_get_phys_port_name; this helps |
79c61899 | 672 | * returning early without hitting the locking section below. |
146e5e73 AT |
673 | */ |
674 | if (!netdev->netdev_ops->ndo_get_phys_port_name && | |
8eba37f7 | 675 | !netdev->devlink_port) |
146e5e73 AT |
676 | return -EOPNOTSUPP; |
677 | ||
79c61899 AT |
678 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
679 | if (ret) | |
680 | return ret; | |
db24a904 | 681 | |
79c61899 AT |
682 | ret = dev_get_phys_port_name(netdev, name, sizeof(name)); |
683 | if (!ret) | |
684 | ret = sysfs_emit(buf, "%s\n", name); | |
db24a904 | 685 | |
db24a904 DA |
686 | rtnl_unlock(); |
687 | ||
688 | return ret; | |
689 | } | |
690 | static DEVICE_ATTR_RO(phys_port_name); | |
691 | ||
aecbe01e JP |
692 | static ssize_t phys_switch_id_show(struct device *dev, |
693 | struct device_attribute *attr, char *buf) | |
694 | { | |
695 | struct net_device *netdev = to_net_dev(dev); | |
79c61899 | 696 | struct netdev_phys_item_id ppid = { }; |
38d41cf5 | 697 | ssize_t ret; |
aecbe01e | 698 | |
146e5e73 | 699 | /* The checks are also done in dev_get_phys_port_name; this helps |
79c61899 | 700 | * returning early without hitting the locking section below. This works |
146e5e73 AT |
701 | * because recurse is false when calling dev_get_port_parent_id. |
702 | */ | |
703 | if (!netdev->netdev_ops->ndo_get_port_parent_id && | |
8eba37f7 | 704 | !netdev->devlink_port) |
146e5e73 AT |
705 | return -EOPNOTSUPP; |
706 | ||
79c61899 AT |
707 | ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); |
708 | if (ret) | |
709 | return ret; | |
aecbe01e | 710 | |
79c61899 AT |
711 | ret = dev_get_port_parent_id(netdev, &ppid, false); |
712 | if (!ret) | |
713 | ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); | |
bccb3025 | 714 | |
aecbe01e JP |
715 | rtnl_unlock(); |
716 | ||
717 | return ret; | |
718 | } | |
719 | static DEVICE_ATTR_RO(phys_switch_id); | |
720 | ||
5fdd2f0e WW |
721 | static ssize_t threaded_show(struct device *dev, |
722 | struct device_attribute *attr, char *buf) | |
723 | { | |
724 | struct net_device *netdev = to_net_dev(dev); | |
725 | ssize_t ret = -EINVAL; | |
726 | ||
c1742dcb | 727 | rcu_read_lock(); |
5fdd2f0e WW |
728 | |
729 | if (dev_isalive(netdev)) | |
c1742dcb ED |
730 | ret = sysfs_emit(buf, fmt_dec, READ_ONCE(netdev->threaded)); |
731 | ||
732 | rcu_read_unlock(); | |
5fdd2f0e | 733 | |
5fdd2f0e WW |
734 | return ret; |
735 | } | |
736 | ||
737 | static int modify_napi_threaded(struct net_device *dev, unsigned long val) | |
738 | { | |
739 | int ret; | |
740 | ||
741 | if (list_empty(&dev->napi_list)) | |
742 | return -EOPNOTSUPP; | |
743 | ||
744 | if (val != 0 && val != 1) | |
745 | return -EOPNOTSUPP; | |
746 | ||
747 | ret = dev_set_threaded(dev, val); | |
748 | ||
749 | return ret; | |
750 | } | |
751 | ||
752 | static ssize_t threaded_store(struct device *dev, | |
753 | struct device_attribute *attr, | |
754 | const char *buf, size_t len) | |
755 | { | |
1bb86cf8 | 756 | return netdev_lock_store(dev, attr, buf, len, modify_napi_threaded); |
5fdd2f0e WW |
757 | } |
758 | static DEVICE_ATTR_RW(threaded); | |
759 | ||
ec6cc599 | 760 | static struct attribute *net_class_attrs[] __ro_after_init = { |
6be8aeef GKH |
761 | &dev_attr_netdev_group.attr, |
762 | &dev_attr_type.attr, | |
763 | &dev_attr_dev_id.attr, | |
3f85944f | 764 | &dev_attr_dev_port.attr, |
6be8aeef GKH |
765 | &dev_attr_iflink.attr, |
766 | &dev_attr_ifindex.attr, | |
685343fc | 767 | &dev_attr_name_assign_type.attr, |
6be8aeef GKH |
768 | &dev_attr_addr_assign_type.attr, |
769 | &dev_attr_addr_len.attr, | |
770 | &dev_attr_link_mode.attr, | |
771 | &dev_attr_address.attr, | |
772 | &dev_attr_broadcast.attr, | |
773 | &dev_attr_speed.attr, | |
774 | &dev_attr_duplex.attr, | |
775 | &dev_attr_dormant.attr, | |
db30a577 | 776 | &dev_attr_testing.attr, |
6be8aeef | 777 | &dev_attr_operstate.attr, |
2d3b479d | 778 | &dev_attr_carrier_changes.attr, |
6be8aeef GKH |
779 | &dev_attr_ifalias.attr, |
780 | &dev_attr_carrier.attr, | |
781 | &dev_attr_mtu.attr, | |
782 | &dev_attr_flags.attr, | |
783 | &dev_attr_tx_queue_len.attr, | |
3b47d303 | 784 | &dev_attr_gro_flush_timeout.attr, |
6f8b12d6 | 785 | &dev_attr_napi_defer_hard_irqs.attr, |
cc998ff8 | 786 | &dev_attr_phys_port_id.attr, |
db24a904 | 787 | &dev_attr_phys_port_name.attr, |
aecbe01e | 788 | &dev_attr_phys_switch_id.attr, |
d746d707 | 789 | &dev_attr_proto_down.attr, |
b2d3bcfa DD |
790 | &dev_attr_carrier_up_count.attr, |
791 | &dev_attr_carrier_down_count.attr, | |
5fdd2f0e | 792 | &dev_attr_threaded.attr, |
6be8aeef | 793 | NULL, |
1da177e4 | 794 | }; |
6be8aeef | 795 | ATTRIBUTE_GROUPS(net_class); |
1da177e4 LT |
796 | |
797 | /* Show a given an attribute in the statistics group */ | |
43cb76d9 GKH |
798 | static ssize_t netstat_show(const struct device *d, |
799 | struct device_attribute *attr, char *buf, | |
1da177e4 LT |
800 | unsigned long offset) |
801 | { | |
43cb76d9 | 802 | struct net_device *dev = to_net_dev(d); |
1da177e4 LT |
803 | ssize_t ret = -EINVAL; |
804 | ||
be1f3c2c | 805 | WARN_ON(offset > sizeof(struct rtnl_link_stats64) || |
6648c65e | 806 | offset % sizeof(u64) != 0); |
1da177e4 | 807 | |
e154bb7a | 808 | rcu_read_lock(); |
96e74088 | 809 | if (dev_isalive(dev)) { |
28172739 ED |
810 | struct rtnl_link_stats64 temp; |
811 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); | |
812 | ||
73c2e90a | 813 | ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); |
96e74088 | 814 | } |
e154bb7a | 815 | rcu_read_unlock(); |
1da177e4 LT |
816 | return ret; |
817 | } | |
818 | ||
819 | /* generate a read-only statistics attribute */ | |
820 | #define NETSTAT_ENTRY(name) \ | |
6be8aeef | 821 | static ssize_t name##_show(struct device *d, \ |
6648c65e | 822 | struct device_attribute *attr, char *buf) \ |
1da177e4 | 823 | { \ |
43cb76d9 | 824 | return netstat_show(d, attr, buf, \ |
be1f3c2c | 825 | offsetof(struct rtnl_link_stats64, name)); \ |
1da177e4 | 826 | } \ |
6be8aeef | 827 | static DEVICE_ATTR_RO(name) |
1da177e4 LT |
828 | |
829 | NETSTAT_ENTRY(rx_packets); | |
830 | NETSTAT_ENTRY(tx_packets); | |
831 | NETSTAT_ENTRY(rx_bytes); | |
832 | NETSTAT_ENTRY(tx_bytes); | |
833 | NETSTAT_ENTRY(rx_errors); | |
834 | NETSTAT_ENTRY(tx_errors); | |
835 | NETSTAT_ENTRY(rx_dropped); | |
836 | NETSTAT_ENTRY(tx_dropped); | |
837 | NETSTAT_ENTRY(multicast); | |
838 | NETSTAT_ENTRY(collisions); | |
839 | NETSTAT_ENTRY(rx_length_errors); | |
840 | NETSTAT_ENTRY(rx_over_errors); | |
841 | NETSTAT_ENTRY(rx_crc_errors); | |
842 | NETSTAT_ENTRY(rx_frame_errors); | |
843 | NETSTAT_ENTRY(rx_fifo_errors); | |
844 | NETSTAT_ENTRY(rx_missed_errors); | |
845 | NETSTAT_ENTRY(tx_aborted_errors); | |
846 | NETSTAT_ENTRY(tx_carrier_errors); | |
847 | NETSTAT_ENTRY(tx_fifo_errors); | |
848 | NETSTAT_ENTRY(tx_heartbeat_errors); | |
849 | NETSTAT_ENTRY(tx_window_errors); | |
850 | NETSTAT_ENTRY(rx_compressed); | |
851 | NETSTAT_ENTRY(tx_compressed); | |
6e7333d3 | 852 | NETSTAT_ENTRY(rx_nohandler); |
1da177e4 | 853 | |
ec6cc599 | 854 | static struct attribute *netstat_attrs[] __ro_after_init = { |
43cb76d9 GKH |
855 | &dev_attr_rx_packets.attr, |
856 | &dev_attr_tx_packets.attr, | |
857 | &dev_attr_rx_bytes.attr, | |
858 | &dev_attr_tx_bytes.attr, | |
859 | &dev_attr_rx_errors.attr, | |
860 | &dev_attr_tx_errors.attr, | |
861 | &dev_attr_rx_dropped.attr, | |
862 | &dev_attr_tx_dropped.attr, | |
863 | &dev_attr_multicast.attr, | |
864 | &dev_attr_collisions.attr, | |
865 | &dev_attr_rx_length_errors.attr, | |
866 | &dev_attr_rx_over_errors.attr, | |
867 | &dev_attr_rx_crc_errors.attr, | |
868 | &dev_attr_rx_frame_errors.attr, | |
869 | &dev_attr_rx_fifo_errors.attr, | |
870 | &dev_attr_rx_missed_errors.attr, | |
871 | &dev_attr_tx_aborted_errors.attr, | |
872 | &dev_attr_tx_carrier_errors.attr, | |
873 | &dev_attr_tx_fifo_errors.attr, | |
874 | &dev_attr_tx_heartbeat_errors.attr, | |
875 | &dev_attr_tx_window_errors.attr, | |
876 | &dev_attr_rx_compressed.attr, | |
877 | &dev_attr_tx_compressed.attr, | |
6e7333d3 | 878 | &dev_attr_rx_nohandler.attr, |
1da177e4 LT |
879 | NULL |
880 | }; | |
881 | ||
38ef00cc | 882 | static const struct attribute_group netstat_group = { |
1da177e4 LT |
883 | .name = "statistics", |
884 | .attrs = netstat_attrs, | |
885 | }; | |
38c1a01c | 886 | |
38c1a01c JB |
887 | static struct attribute *wireless_attrs[] = { |
888 | NULL | |
889 | }; | |
890 | ||
38ef00cc | 891 | static const struct attribute_group wireless_group = { |
38c1a01c JB |
892 | .name = "wireless", |
893 | .attrs = wireless_attrs, | |
894 | }; | |
c304eddc JK |
895 | |
896 | static bool wireless_group_needed(struct net_device *ndev) | |
897 | { | |
898 | #if IS_ENABLED(CONFIG_CFG80211) | |
899 | if (ndev->ieee80211_ptr) | |
900 | return true; | |
38c1a01c | 901 | #endif |
c304eddc JK |
902 | #if IS_ENABLED(CONFIG_WIRELESS_EXT) |
903 | if (ndev->wireless_handlers) | |
904 | return true; | |
905 | #endif | |
906 | return false; | |
907 | } | |
6be8aeef GKH |
908 | |
909 | #else /* CONFIG_SYSFS */ | |
910 | #define net_class_groups NULL | |
d6523ddf | 911 | #endif /* CONFIG_SYSFS */ |
1da177e4 | 912 | |
a953be53 | 913 | #ifdef CONFIG_SYSFS |
6648c65e | 914 | #define to_rx_queue_attr(_attr) \ |
915 | container_of(_attr, struct rx_queue_attribute, attr) | |
0a9627f2 TH |
916 | |
917 | #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) | |
918 | ||
919 | static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, | |
920 | char *buf) | |
921 | { | |
667e427b | 922 | const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); |
0a9627f2 TH |
923 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
924 | ||
925 | if (!attribute->show) | |
926 | return -EIO; | |
927 | ||
718ad681 | 928 | return attribute->show(queue, buf); |
0a9627f2 TH |
929 | } |
930 | ||
931 | static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
932 | const char *buf, size_t count) | |
933 | { | |
667e427b | 934 | const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); |
0a9627f2 TH |
935 | struct netdev_rx_queue *queue = to_rx_queue(kobj); |
936 | ||
937 | if (!attribute->store) | |
938 | return -EIO; | |
939 | ||
718ad681 | 940 | return attribute->store(queue, buf, count); |
0a9627f2 TH |
941 | } |
942 | ||
fa50d645 | 943 | static const struct sysfs_ops rx_queue_sysfs_ops = { |
0a9627f2 TH |
944 | .show = rx_queue_attr_show, |
945 | .store = rx_queue_attr_store, | |
946 | }; | |
947 | ||
a953be53 | 948 | #ifdef CONFIG_RPS |
718ad681 | 949 | static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) |
0a9627f2 TH |
950 | { |
951 | struct rps_map *map; | |
952 | cpumask_var_t mask; | |
f0906827 | 953 | int i, len; |
0a9627f2 TH |
954 | |
955 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | |
956 | return -ENOMEM; | |
957 | ||
958 | rcu_read_lock(); | |
959 | map = rcu_dereference(queue->rps_map); | |
960 | if (map) | |
961 | for (i = 0; i < map->len; i++) | |
962 | cpumask_set_cpu(map->cpus[i], mask); | |
963 | ||
73c2e90a | 964 | len = sysfs_emit(buf, "%*pb\n", cpumask_pr_args(mask)); |
0a9627f2 | 965 | rcu_read_unlock(); |
0a9627f2 | 966 | free_cpumask_var(mask); |
f0906827 TH |
967 | |
968 | return len < PAGE_SIZE ? len : -EINVAL; | |
0a9627f2 TH |
969 | } |
970 | ||
370ca718 PA |
971 | static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue, |
972 | cpumask_var_t mask) | |
0a9627f2 | 973 | { |
da65ad1f | 974 | static DEFINE_MUTEX(rps_map_mutex); |
370ca718 PA |
975 | struct rps_map *old_map, *map; |
976 | int cpu, i; | |
07bbecb3 | 977 | |
95c96174 | 978 | map = kzalloc(max_t(unsigned int, |
6648c65e | 979 | RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), |
980 | GFP_KERNEL); | |
370ca718 | 981 | if (!map) |
0a9627f2 | 982 | return -ENOMEM; |
0a9627f2 TH |
983 | |
984 | i = 0; | |
985 | for_each_cpu_and(cpu, mask, cpu_online_mask) | |
986 | map->cpus[i++] = cpu; | |
987 | ||
6648c65e | 988 | if (i) { |
0a9627f2 | 989 | map->len = i; |
6648c65e | 990 | } else { |
0a9627f2 TH |
991 | kfree(map); |
992 | map = NULL; | |
993 | } | |
994 | ||
da65ad1f | 995 | mutex_lock(&rps_map_mutex); |
6e3f7faf | 996 | old_map = rcu_dereference_protected(queue->rps_map, |
da65ad1f | 997 | mutex_is_locked(&rps_map_mutex)); |
0a9627f2 | 998 | rcu_assign_pointer(queue->rps_map, map); |
0a9627f2 | 999 | |
adc9300e | 1000 | if (map) |
dc05360f | 1001 | static_branch_inc(&rps_needed); |
10e4ea75 | 1002 | if (old_map) |
dc05360f | 1003 | static_branch_dec(&rps_needed); |
10e4ea75 | 1004 | |
da65ad1f | 1005 | mutex_unlock(&rps_map_mutex); |
10e4ea75 TH |
1006 | |
1007 | if (old_map) | |
1008 | kfree_rcu(old_map, rcu); | |
370ca718 PA |
1009 | return 0; |
1010 | } | |
1011 | ||
1012 | int rps_cpumask_housekeeping(struct cpumask *mask) | |
1013 | { | |
1014 | if (!cpumask_empty(mask)) { | |
1015 | cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN)); | |
1016 | cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ)); | |
1017 | if (cpumask_empty(mask)) | |
1018 | return -EINVAL; | |
1019 | } | |
1020 | return 0; | |
1021 | } | |
1022 | ||
1023 | static ssize_t store_rps_map(struct netdev_rx_queue *queue, | |
1024 | const char *buf, size_t len) | |
1025 | { | |
1026 | cpumask_var_t mask; | |
1027 | int err; | |
1028 | ||
1029 | if (!capable(CAP_NET_ADMIN)) | |
1030 | return -EPERM; | |
1031 | ||
1032 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | |
1033 | return -ENOMEM; | |
1034 | ||
1035 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); | |
1036 | if (err) | |
1037 | goto out; | |
1038 | ||
1039 | err = rps_cpumask_housekeeping(mask); | |
1040 | if (err) | |
1041 | goto out; | |
1042 | ||
1043 | err = netdev_rx_queue_set_rps_mask(queue, mask); | |
10e4ea75 | 1044 | |
370ca718 | 1045 | out: |
0a9627f2 | 1046 | free_cpumask_var(mask); |
370ca718 | 1047 | return err ? : len; |
0a9627f2 TH |
1048 | } |
1049 | ||
fec5e652 | 1050 | static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, |
fec5e652 TH |
1051 | char *buf) |
1052 | { | |
1053 | struct rps_dev_flow_table *flow_table; | |
60b778ce | 1054 | unsigned long val = 0; |
fec5e652 TH |
1055 | |
1056 | rcu_read_lock(); | |
1057 | flow_table = rcu_dereference(queue->rps_flow_table); | |
1058 | if (flow_table) | |
f3483c8e | 1059 | val = 1UL << flow_table->log; |
fec5e652 TH |
1060 | rcu_read_unlock(); |
1061 | ||
73c2e90a | 1062 | return sysfs_emit(buf, "%lu\n", val); |
fec5e652 TH |
1063 | } |
1064 | ||
fec5e652 TH |
1065 | static void rps_dev_flow_table_release(struct rcu_head *rcu) |
1066 | { | |
1067 | struct rps_dev_flow_table *table = container_of(rcu, | |
1068 | struct rps_dev_flow_table, rcu); | |
243198d0 | 1069 | vfree(table); |
fec5e652 TH |
1070 | } |
1071 | ||
f5acb907 | 1072 | static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, |
718ad681 | 1073 | const char *buf, size_t len) |
fec5e652 | 1074 | { |
60b778ce | 1075 | unsigned long mask, count; |
fec5e652 TH |
1076 | struct rps_dev_flow_table *table, *old_table; |
1077 | static DEFINE_SPINLOCK(rps_dev_flow_lock); | |
60b778ce | 1078 | int rc; |
fec5e652 TH |
1079 | |
1080 | if (!capable(CAP_NET_ADMIN)) | |
1081 | return -EPERM; | |
1082 | ||
60b778ce ED |
1083 | rc = kstrtoul(buf, 0, &count); |
1084 | if (rc < 0) | |
1085 | return rc; | |
fec5e652 TH |
1086 | |
1087 | if (count) { | |
60b778ce ED |
1088 | mask = count - 1; |
1089 | /* mask = roundup_pow_of_two(count) - 1; | |
1090 | * without overflows... | |
1091 | */ | |
1092 | while ((mask | (mask >> 1)) != mask) | |
1093 | mask |= (mask >> 1); | |
1094 | /* On 64 bit arches, must check mask fits in table->mask (u32), | |
8e3bff96 | 1095 | * and on 32bit arches, must check |
1096 | * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow. | |
60b778ce ED |
1097 | */ |
1098 | #if BITS_PER_LONG > 32 | |
1099 | if (mask > (unsigned long)(u32)mask) | |
a0a129f8 | 1100 | return -EINVAL; |
60b778ce ED |
1101 | #else |
1102 | if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) | |
a0a129f8 | 1103 | / sizeof(struct rps_dev_flow)) { |
fec5e652 TH |
1104 | /* Enforce a limit to prevent overflow */ |
1105 | return -EINVAL; | |
1106 | } | |
60b778ce ED |
1107 | #endif |
1108 | table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); | |
fec5e652 TH |
1109 | if (!table) |
1110 | return -ENOMEM; | |
1111 | ||
f3483c8e | 1112 | table->log = ilog2(mask) + 1; |
60b778ce ED |
1113 | for (count = 0; count <= mask; count++) |
1114 | table->flows[count].cpu = RPS_NO_CPU; | |
6648c65e | 1115 | } else { |
fec5e652 | 1116 | table = NULL; |
6648c65e | 1117 | } |
fec5e652 TH |
1118 | |
1119 | spin_lock(&rps_dev_flow_lock); | |
6e3f7faf ED |
1120 | old_table = rcu_dereference_protected(queue->rps_flow_table, |
1121 | lockdep_is_held(&rps_dev_flow_lock)); | |
fec5e652 TH |
1122 | rcu_assign_pointer(queue->rps_flow_table, table); |
1123 | spin_unlock(&rps_dev_flow_lock); | |
1124 | ||
1125 | if (old_table) | |
1126 | call_rcu(&old_table->rcu, rps_dev_flow_table_release); | |
1127 | ||
1128 | return len; | |
1129 | } | |
1130 | ||
667e427b | 1131 | static struct rx_queue_attribute rps_cpus_attribute __ro_after_init |
d6444062 | 1132 | = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map); |
0a9627f2 | 1133 | |
667e427b | 1134 | static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init |
d6444062 | 1135 | = __ATTR(rps_flow_cnt, 0644, |
667e427b | 1136 | show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); |
a953be53 | 1137 | #endif /* CONFIG_RPS */ |
fec5e652 | 1138 | |
667e427b | 1139 | static struct attribute *rx_queue_default_attrs[] __ro_after_init = { |
a953be53 | 1140 | #ifdef CONFIG_RPS |
0a9627f2 | 1141 | &rps_cpus_attribute.attr, |
fec5e652 | 1142 | &rps_dev_flow_table_cnt_attribute.attr, |
a953be53 | 1143 | #endif |
0a9627f2 TH |
1144 | NULL |
1145 | }; | |
be0d6926 | 1146 | ATTRIBUTE_GROUPS(rx_queue_default); |
0a9627f2 TH |
1147 | |
1148 | static void rx_queue_release(struct kobject *kobj) | |
1149 | { | |
1150 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | |
a953be53 | 1151 | #ifdef CONFIG_RPS |
6e3f7faf ED |
1152 | struct rps_map *map; |
1153 | struct rps_dev_flow_table *flow_table; | |
0a9627f2 | 1154 | |
33d480ce | 1155 | map = rcu_dereference_protected(queue->rps_map, 1); |
9ea19481 JF |
1156 | if (map) { |
1157 | RCU_INIT_POINTER(queue->rps_map, NULL); | |
f6f80238 | 1158 | kfree_rcu(map, rcu); |
9ea19481 | 1159 | } |
6e3f7faf | 1160 | |
33d480ce | 1161 | flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); |
9ea19481 JF |
1162 | if (flow_table) { |
1163 | RCU_INIT_POINTER(queue->rps_flow_table, NULL); | |
6e3f7faf | 1164 | call_rcu(&flow_table->rcu, rps_dev_flow_table_release); |
9ea19481 | 1165 | } |
a953be53 | 1166 | #endif |
0a9627f2 | 1167 | |
9ea19481 | 1168 | memset(kobj, 0, sizeof(*kobj)); |
d62607c3 | 1169 | netdev_put(queue->dev, &queue->dev_tracker); |
0a9627f2 TH |
1170 | } |
1171 | ||
02a476d9 | 1172 | static const void *rx_queue_namespace(const struct kobject *kobj) |
82ef3d5d WC |
1173 | { |
1174 | struct netdev_rx_queue *queue = to_rx_queue(kobj); | |
1175 | struct device *dev = &queue->dev->dev; | |
1176 | const void *ns = NULL; | |
1177 | ||
8f088541 | 1178 | if (dev->class && dev->class->namespace) |
82ef3d5d WC |
1179 | ns = dev->class->namespace(dev); |
1180 | ||
1181 | return ns; | |
1182 | } | |
1183 | ||
02a476d9 | 1184 | static void rx_queue_get_ownership(const struct kobject *kobj, |
b0e37c0d DT |
1185 | kuid_t *uid, kgid_t *gid) |
1186 | { | |
1187 | const struct net *net = rx_queue_namespace(kobj); | |
1188 | ||
1189 | net_ns_get_ownership(net, uid, gid); | |
1190 | } | |
1191 | ||
b2793517 | 1192 | static const struct kobj_type rx_queue_ktype = { |
0a9627f2 TH |
1193 | .sysfs_ops = &rx_queue_sysfs_ops, |
1194 | .release = rx_queue_release, | |
b0e37c0d DT |
1195 | .namespace = rx_queue_namespace, |
1196 | .get_ownership = rx_queue_get_ownership, | |
0a9627f2 TH |
1197 | }; |
1198 | ||
50bcfe8d PA |
1199 | static int rx_queue_default_mask(struct net_device *dev, |
1200 | struct netdev_rx_queue *queue) | |
1201 | { | |
1202 | #if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL) | |
1203 | struct cpumask *rps_default_mask = READ_ONCE(dev_net(dev)->core.rps_default_mask); | |
1204 | ||
1205 | if (rps_default_mask && !cpumask_empty(rps_default_mask)) | |
1206 | return netdev_rx_queue_set_rps_mask(queue, rps_default_mask); | |
1207 | #endif | |
1208 | return 0; | |
1209 | } | |
1210 | ||
6b53dafe | 1211 | static int rx_queue_add_kobject(struct net_device *dev, int index) |
0a9627f2 | 1212 | { |
6b53dafe | 1213 | struct netdev_rx_queue *queue = dev->_rx + index; |
0a9627f2 TH |
1214 | struct kobject *kobj = &queue->kobj; |
1215 | int error = 0; | |
1216 | ||
7e54f85c AT |
1217 | /* Rx queues are cleared in rx_queue_release to allow later |
1218 | * re-registration. This is triggered when their kobj refcount is | |
1219 | * dropped. | |
1220 | * | |
1221 | * If a queue is removed while both a read (or write) operation and a | |
1222 | * the re-addition of the same queue are pending (waiting on rntl_lock) | |
1223 | * it might happen that the re-addition will execute before the read, | |
1224 | * making the initial removal to never happen (queue's kobj refcount | |
1225 | * won't drop enough because of the pending read). In such rare case, | |
1226 | * return to allow the removal operation to complete. | |
1227 | */ | |
1228 | if (unlikely(kobj->state_initialized)) { | |
1229 | netdev_warn_once(dev, "Cannot re-add rx queues before their removal completed"); | |
1230 | return -EAGAIN; | |
1231 | } | |
1232 | ||
ddd9b5e3 JH |
1233 | /* Kobject_put later will trigger rx_queue_release call which |
1234 | * decreases dev refcount: Take that reference here | |
1235 | */ | |
d62607c3 | 1236 | netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); |
ddd9b5e3 | 1237 | |
6b53dafe | 1238 | kobj->kset = dev->queues_kset; |
0a9627f2 | 1239 | error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, |
6648c65e | 1240 | "rx-%u", index); |
a953be53 | 1241 | if (error) |
b8eb7183 | 1242 | goto err; |
a953be53 | 1243 | |
b7ecc1de AT |
1244 | queue->groups = rx_queue_default_groups; |
1245 | error = sysfs_create_groups(kobj, queue->groups); | |
1246 | if (error) | |
1247 | goto err; | |
1248 | ||
6b53dafe WC |
1249 | if (dev->sysfs_rx_queue_group) { |
1250 | error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); | |
b8eb7183 | 1251 | if (error) |
b7ecc1de | 1252 | goto err_default_groups; |
0a9627f2 TH |
1253 | } |
1254 | ||
50bcfe8d PA |
1255 | error = rx_queue_default_mask(dev, queue); |
1256 | if (error) | |
b7ecc1de | 1257 | goto err_default_groups; |
50bcfe8d | 1258 | |
0a9627f2 TH |
1259 | kobject_uevent(kobj, KOBJ_ADD); |
1260 | ||
1261 | return error; | |
b8eb7183 | 1262 | |
b7ecc1de AT |
1263 | err_default_groups: |
1264 | sysfs_remove_groups(kobj, queue->groups); | |
b8eb7183 JH |
1265 | err: |
1266 | kobject_put(kobj); | |
1267 | return error; | |
0a9627f2 | 1268 | } |
d755407d CB |
1269 | |
1270 | static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid, | |
1271 | kgid_t kgid) | |
1272 | { | |
1273 | struct netdev_rx_queue *queue = dev->_rx + index; | |
1274 | struct kobject *kobj = &queue->kobj; | |
1275 | int error; | |
1276 | ||
1277 | error = sysfs_change_owner(kobj, kuid, kgid); | |
1278 | if (error) | |
1279 | return error; | |
1280 | ||
1281 | if (dev->sysfs_rx_queue_group) | |
1282 | error = sysfs_group_change_owner( | |
1283 | kobj, dev->sysfs_rx_queue_group, kuid, kgid); | |
1284 | ||
1285 | return error; | |
1286 | } | |
80dd6eac | 1287 | #endif /* CONFIG_SYSFS */ |
0a9627f2 | 1288 | |
62fe0b40 | 1289 | int |
6b53dafe | 1290 | net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) |
0a9627f2 | 1291 | { |
a953be53 | 1292 | #ifdef CONFIG_SYSFS |
0a9627f2 TH |
1293 | int i; |
1294 | int error = 0; | |
1295 | ||
a953be53 | 1296 | #ifndef CONFIG_RPS |
6b53dafe | 1297 | if (!dev->sysfs_rx_queue_group) |
a953be53 MD |
1298 | return 0; |
1299 | #endif | |
62fe0b40 | 1300 | for (i = old_num; i < new_num; i++) { |
6b53dafe | 1301 | error = rx_queue_add_kobject(dev, i); |
62fe0b40 BH |
1302 | if (error) { |
1303 | new_num = old_num; | |
0a9627f2 | 1304 | break; |
62fe0b40 | 1305 | } |
0a9627f2 TH |
1306 | } |
1307 | ||
a953be53 | 1308 | while (--i >= new_num) { |
b7ecc1de AT |
1309 | struct netdev_rx_queue *queue = &dev->_rx[i]; |
1310 | struct kobject *kobj = &queue->kobj; | |
002d8a1a | 1311 | |
8b8f3e66 | 1312 | if (!refcount_read(&dev_net(dev)->ns.count)) |
002d8a1a | 1313 | kobj->uevent_suppress = 1; |
6b53dafe | 1314 | if (dev->sysfs_rx_queue_group) |
002d8a1a | 1315 | sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); |
b7ecc1de | 1316 | sysfs_remove_groups(kobj, queue->groups); |
002d8a1a | 1317 | kobject_put(kobj); |
a953be53 | 1318 | } |
0a9627f2 TH |
1319 | |
1320 | return error; | |
bf264145 TH |
1321 | #else |
1322 | return 0; | |
1323 | #endif | |
0a9627f2 TH |
1324 | } |
1325 | ||
d755407d CB |
1326 | static int net_rx_queue_change_owner(struct net_device *dev, int num, |
1327 | kuid_t kuid, kgid_t kgid) | |
1328 | { | |
1329 | #ifdef CONFIG_SYSFS | |
1330 | int error = 0; | |
1331 | int i; | |
1332 | ||
1333 | #ifndef CONFIG_RPS | |
1334 | if (!dev->sysfs_rx_queue_group) | |
1335 | return 0; | |
1336 | #endif | |
1337 | for (i = 0; i < num; i++) { | |
1338 | error = rx_queue_change_owner(dev, i, kuid, kgid); | |
1339 | if (error) | |
1340 | break; | |
1341 | } | |
1342 | ||
1343 | return error; | |
1344 | #else | |
1345 | return 0; | |
1346 | #endif | |
1347 | } | |
1348 | ||
ccf5ff69 | 1349 | #ifdef CONFIG_SYSFS |
1d24eb48 TH |
1350 | /* |
1351 | * netdev_queue sysfs structures and functions. | |
1352 | */ | |
1353 | struct netdev_queue_attribute { | |
1354 | struct attribute attr; | |
b0b6fcfa AT |
1355 | ssize_t (*show)(struct kobject *kobj, struct attribute *attr, |
1356 | struct netdev_queue *queue, char *buf); | |
1357 | ssize_t (*store)(struct kobject *kobj, struct attribute *attr, | |
1358 | struct netdev_queue *queue, const char *buf, | |
1359 | size_t len); | |
1d24eb48 | 1360 | }; |
6648c65e | 1361 | #define to_netdev_queue_attr(_attr) \ |
1362 | container_of(_attr, struct netdev_queue_attribute, attr) | |
1d24eb48 TH |
1363 | |
1364 | #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) | |
1365 | ||
1366 | static ssize_t netdev_queue_attr_show(struct kobject *kobj, | |
1367 | struct attribute *attr, char *buf) | |
1368 | { | |
667e427b | 1369 | const struct netdev_queue_attribute *attribute |
1370 | = to_netdev_queue_attr(attr); | |
1d24eb48 TH |
1371 | struct netdev_queue *queue = to_netdev_queue(kobj); |
1372 | ||
1373 | if (!attribute->show) | |
1374 | return -EIO; | |
1375 | ||
b0b6fcfa | 1376 | return attribute->show(kobj, attr, queue, buf); |
1d24eb48 TH |
1377 | } |
1378 | ||
1379 | static ssize_t netdev_queue_attr_store(struct kobject *kobj, | |
1380 | struct attribute *attr, | |
1381 | const char *buf, size_t count) | |
1382 | { | |
667e427b | 1383 | const struct netdev_queue_attribute *attribute |
1384 | = to_netdev_queue_attr(attr); | |
1d24eb48 TH |
1385 | struct netdev_queue *queue = to_netdev_queue(kobj); |
1386 | ||
1387 | if (!attribute->store) | |
1388 | return -EIO; | |
1389 | ||
b0b6fcfa | 1390 | return attribute->store(kobj, attr, queue, buf, count); |
1d24eb48 TH |
1391 | } |
1392 | ||
1393 | static const struct sysfs_ops netdev_queue_sysfs_ops = { | |
1394 | .show = netdev_queue_attr_show, | |
1395 | .store = netdev_queue_attr_store, | |
1396 | }; | |
1397 | ||
b0b6fcfa AT |
1398 | static ssize_t tx_timeout_show(struct kobject *kobj, struct attribute *attr, |
1399 | struct netdev_queue *queue, char *buf) | |
ccf5ff69 | 1400 | { |
8160fb43 | 1401 | unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout); |
ccf5ff69 | 1402 | |
73c2e90a | 1403 | return sysfs_emit(buf, fmt_ulong, trans_timeout); |
ccf5ff69 | 1404 | } |
1405 | ||
c4047f53 | 1406 | static unsigned int get_netdev_queue_index(struct netdev_queue *queue) |
822b3b2e JF |
1407 | { |
1408 | struct net_device *dev = queue->dev; | |
c4047f53 | 1409 | unsigned int i; |
822b3b2e | 1410 | |
c4047f53 | 1411 | i = queue - dev->_tx; |
822b3b2e JF |
1412 | BUG_ON(i >= dev->num_tx_queues); |
1413 | ||
1414 | return i; | |
1415 | } | |
1416 | ||
b0b6fcfa AT |
1417 | static ssize_t traffic_class_show(struct kobject *kobj, struct attribute *attr, |
1418 | struct netdev_queue *queue, char *buf) | |
8d059b0f AD |
1419 | { |
1420 | struct net_device *dev = queue->dev; | |
b0b6fcfa | 1421 | int num_tc, tc, index, ret; |
8d059b0f | 1422 | |
d7be9775 AD |
1423 | if (!netif_is_multiqueue(dev)) |
1424 | return -ENOENT; | |
1425 | ||
b0b6fcfa AT |
1426 | ret = sysfs_rtnl_lock(kobj, attr, queue->dev); |
1427 | if (ret) | |
1428 | return ret; | |
b2f17564 | 1429 | |
d7be9775 | 1430 | index = get_netdev_queue_index(queue); |
ffcfe25b AD |
1431 | |
1432 | /* If queue belongs to subordinate dev use its TC mapping */ | |
1433 | dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; | |
1434 | ||
b2f17564 | 1435 | num_tc = dev->num_tc; |
d7be9775 | 1436 | tc = netdev_txq_to_tc(dev, index); |
b2f17564 AD |
1437 | |
1438 | rtnl_unlock(); | |
1439 | ||
8d059b0f AD |
1440 | if (tc < 0) |
1441 | return -EINVAL; | |
1442 | ||
ffcfe25b AD |
1443 | /* We can report the traffic class one of two ways: |
1444 | * Subordinate device traffic classes are reported with the traffic | |
1445 | * class first, and then the subordinate class so for example TC0 on | |
1446 | * subordinate device 2 will be reported as "0-2". If the queue | |
1447 | * belongs to the root device it will be reported with just the | |
1448 | * traffic class, so just "0" for TC 0 for example. | |
1449 | */ | |
73c2e90a WY |
1450 | return num_tc < 0 ? sysfs_emit(buf, "%d%d\n", tc, num_tc) : |
1451 | sysfs_emit(buf, "%d\n", tc); | |
8d059b0f AD |
1452 | } |
1453 | ||
1454 | #ifdef CONFIG_XPS | |
b0b6fcfa AT |
1455 | static ssize_t tx_maxrate_show(struct kobject *kobj, struct attribute *attr, |
1456 | struct netdev_queue *queue, char *buf) | |
822b3b2e | 1457 | { |
73c2e90a | 1458 | return sysfs_emit(buf, "%lu\n", queue->tx_maxrate); |
822b3b2e JF |
1459 | } |
1460 | ||
b0b6fcfa AT |
1461 | static ssize_t tx_maxrate_store(struct kobject *kobj, struct attribute *attr, |
1462 | struct netdev_queue *queue, const char *buf, | |
1463 | size_t len) | |
822b3b2e | 1464 | { |
822b3b2e | 1465 | int err, index = get_netdev_queue_index(queue); |
b0b6fcfa | 1466 | struct net_device *dev = queue->dev; |
822b3b2e JF |
1467 | u32 rate = 0; |
1468 | ||
3033fced TH |
1469 | if (!capable(CAP_NET_ADMIN)) |
1470 | return -EPERM; | |
1471 | ||
146e5e73 | 1472 | /* The check is also done later; this helps returning early without |
b0b6fcfa | 1473 | * hitting the locking section below. |
146e5e73 AT |
1474 | */ |
1475 | if (!dev->netdev_ops->ndo_set_tx_maxrate) | |
1476 | return -EOPNOTSUPP; | |
1477 | ||
822b3b2e JF |
1478 | err = kstrtou32(buf, 10, &rate); |
1479 | if (err < 0) | |
1480 | return err; | |
1481 | ||
b0b6fcfa AT |
1482 | err = sysfs_rtnl_lock(kobj, attr, dev); |
1483 | if (err) | |
1484 | return err; | |
822b3b2e JF |
1485 | |
1486 | err = -EOPNOTSUPP; | |
ad7c7b21 | 1487 | netdev_lock_ops(dev); |
822b3b2e JF |
1488 | if (dev->netdev_ops->ndo_set_tx_maxrate) |
1489 | err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); | |
ad7c7b21 | 1490 | netdev_unlock_ops(dev); |
822b3b2e | 1491 | |
822b3b2e JF |
1492 | if (!err) { |
1493 | queue->tx_maxrate = rate; | |
b0b6fcfa | 1494 | rtnl_unlock(); |
822b3b2e JF |
1495 | return len; |
1496 | } | |
b0b6fcfa AT |
1497 | |
1498 | rtnl_unlock(); | |
822b3b2e JF |
1499 | return err; |
1500 | } | |
1501 | ||
2b9c7581 | 1502 | static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init |
1503 | = __ATTR_RW(tx_maxrate); | |
822b3b2e JF |
1504 | #endif |
1505 | ||
2b9c7581 | 1506 | static struct netdev_queue_attribute queue_trans_timeout __ro_after_init |
1507 | = __ATTR_RO(tx_timeout); | |
ccf5ff69 | 1508 | |
2b9c7581 | 1509 | static struct netdev_queue_attribute queue_traffic_class __ro_after_init |
1510 | = __ATTR_RO(traffic_class); | |
8d059b0f | 1511 | |
114cf580 TH |
1512 | #ifdef CONFIG_BQL |
1513 | /* | |
1514 | * Byte queue limits sysfs structures and functions. | |
1515 | */ | |
1516 | static ssize_t bql_show(char *buf, unsigned int value) | |
1517 | { | |
73c2e90a | 1518 | return sysfs_emit(buf, "%u\n", value); |
114cf580 TH |
1519 | } |
1520 | ||
1521 | static ssize_t bql_set(const char *buf, const size_t count, | |
1522 | unsigned int *pvalue) | |
1523 | { | |
1524 | unsigned int value; | |
1525 | int err; | |
1526 | ||
6648c65e | 1527 | if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { |
114cf580 | 1528 | value = DQL_MAX_LIMIT; |
6648c65e | 1529 | } else { |
114cf580 TH |
1530 | err = kstrtouint(buf, 10, &value); |
1531 | if (err < 0) | |
1532 | return err; | |
1533 | if (value > DQL_MAX_LIMIT) | |
1534 | return -EINVAL; | |
1535 | } | |
1536 | ||
1537 | *pvalue = value; | |
1538 | ||
1539 | return count; | |
1540 | } | |
1541 | ||
b0b6fcfa AT |
1542 | static ssize_t bql_show_hold_time(struct kobject *kobj, struct attribute *attr, |
1543 | struct netdev_queue *queue, char *buf) | |
114cf580 TH |
1544 | { |
1545 | struct dql *dql = &queue->dql; | |
1546 | ||
73c2e90a | 1547 | return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); |
114cf580 TH |
1548 | } |
1549 | ||
b0b6fcfa AT |
1550 | static ssize_t bql_set_hold_time(struct kobject *kobj, struct attribute *attr, |
1551 | struct netdev_queue *queue, const char *buf, | |
1552 | size_t len) | |
114cf580 TH |
1553 | { |
1554 | struct dql *dql = &queue->dql; | |
95c96174 | 1555 | unsigned int value; |
114cf580 TH |
1556 | int err; |
1557 | ||
1558 | err = kstrtouint(buf, 10, &value); | |
1559 | if (err < 0) | |
1560 | return err; | |
1561 | ||
1562 | dql->slack_hold_time = msecs_to_jiffies(value); | |
1563 | ||
1564 | return len; | |
1565 | } | |
1566 | ||
170c658a | 1567 | static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init |
d6444062 | 1568 | = __ATTR(hold_time, 0644, |
170c658a | 1569 | bql_show_hold_time, bql_set_hold_time); |
114cf580 | 1570 | |
b0b6fcfa AT |
1571 | static ssize_t bql_show_stall_thrs(struct kobject *kobj, struct attribute *attr, |
1572 | struct netdev_queue *queue, char *buf) | |
6025b913 JK |
1573 | { |
1574 | struct dql *dql = &queue->dql; | |
1575 | ||
db77cdc6 | 1576 | return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs)); |
6025b913 JK |
1577 | } |
1578 | ||
b0b6fcfa AT |
1579 | static ssize_t bql_set_stall_thrs(struct kobject *kobj, struct attribute *attr, |
1580 | struct netdev_queue *queue, const char *buf, | |
1581 | size_t len) | |
6025b913 JK |
1582 | { |
1583 | struct dql *dql = &queue->dql; | |
1584 | unsigned int value; | |
1585 | int err; | |
1586 | ||
1587 | err = kstrtouint(buf, 10, &value); | |
1588 | if (err < 0) | |
1589 | return err; | |
1590 | ||
1591 | value = msecs_to_jiffies(value); | |
1592 | if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG)) | |
1593 | return -ERANGE; | |
1594 | ||
1595 | if (!dql->stall_thrs && value) | |
1596 | dql->last_reap = jiffies; | |
1597 | /* Force last_reap to be live */ | |
1598 | smp_wmb(); | |
1599 | dql->stall_thrs = value; | |
1600 | ||
1601 | return len; | |
1602 | } | |
1603 | ||
1604 | static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init = | |
1605 | __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs); | |
1606 | ||
b0b6fcfa AT |
1607 | static ssize_t bql_show_stall_max(struct kobject *kobj, struct attribute *attr, |
1608 | struct netdev_queue *queue, char *buf) | |
6025b913 | 1609 | { |
db77cdc6 | 1610 | return sysfs_emit(buf, "%u\n", READ_ONCE(queue->dql.stall_max)); |
6025b913 JK |
1611 | } |
1612 | ||
b0b6fcfa AT |
1613 | static ssize_t bql_set_stall_max(struct kobject *kobj, struct attribute *attr, |
1614 | struct netdev_queue *queue, const char *buf, | |
1615 | size_t len) | |
6025b913 JK |
1616 | { |
1617 | WRITE_ONCE(queue->dql.stall_max, 0); | |
1618 | return len; | |
1619 | } | |
1620 | ||
1621 | static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init = | |
1622 | __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max); | |
1623 | ||
b0b6fcfa AT |
1624 | static ssize_t bql_show_stall_cnt(struct kobject *kobj, struct attribute *attr, |
1625 | struct netdev_queue *queue, char *buf) | |
6025b913 JK |
1626 | { |
1627 | struct dql *dql = &queue->dql; | |
1628 | ||
db77cdc6 | 1629 | return sysfs_emit(buf, "%lu\n", dql->stall_cnt); |
6025b913 JK |
1630 | } |
1631 | ||
1632 | static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init = | |
1633 | __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL); | |
1634 | ||
b0b6fcfa AT |
1635 | static ssize_t bql_show_inflight(struct kobject *kobj, struct attribute *attr, |
1636 | struct netdev_queue *queue, char *buf) | |
114cf580 TH |
1637 | { |
1638 | struct dql *dql = &queue->dql; | |
1639 | ||
73c2e90a | 1640 | return sysfs_emit(buf, "%u\n", dql->num_queued - dql->num_completed); |
114cf580 TH |
1641 | } |
1642 | ||
170c658a | 1643 | static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = |
d6444062 | 1644 | __ATTR(inflight, 0444, bql_show_inflight, NULL); |
114cf580 TH |
1645 | |
1646 | #define BQL_ATTR(NAME, FIELD) \ | |
b0b6fcfa AT |
1647 | static ssize_t bql_show_ ## NAME(struct kobject *kobj, \ |
1648 | struct attribute *attr, \ | |
1649 | struct netdev_queue *queue, char *buf) \ | |
114cf580 TH |
1650 | { \ |
1651 | return bql_show(buf, queue->dql.FIELD); \ | |
1652 | } \ | |
1653 | \ | |
b0b6fcfa AT |
1654 | static ssize_t bql_set_ ## NAME(struct kobject *kobj, \ |
1655 | struct attribute *attr, \ | |
1656 | struct netdev_queue *queue, \ | |
114cf580 TH |
1657 | const char *buf, size_t len) \ |
1658 | { \ | |
1659 | return bql_set(buf, len, &queue->dql.FIELD); \ | |
1660 | } \ | |
1661 | \ | |
170c658a | 1662 | static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ |
d6444062 | 1663 | = __ATTR(NAME, 0644, \ |
170c658a | 1664 | bql_show_ ## NAME, bql_set_ ## NAME) |
114cf580 | 1665 | |
170c658a | 1666 | BQL_ATTR(limit, limit); |
1667 | BQL_ATTR(limit_max, max_limit); | |
1668 | BQL_ATTR(limit_min, min_limit); | |
114cf580 | 1669 | |
170c658a | 1670 | static struct attribute *dql_attrs[] __ro_after_init = { |
114cf580 TH |
1671 | &bql_limit_attribute.attr, |
1672 | &bql_limit_max_attribute.attr, | |
1673 | &bql_limit_min_attribute.attr, | |
1674 | &bql_hold_time_attribute.attr, | |
1675 | &bql_inflight_attribute.attr, | |
6025b913 JK |
1676 | &bql_stall_thrs_attribute.attr, |
1677 | &bql_stall_cnt_attribute.attr, | |
1678 | &bql_stall_max_attribute.attr, | |
114cf580 TH |
1679 | NULL |
1680 | }; | |
1681 | ||
38ef00cc | 1682 | static const struct attribute_group dql_group = { |
114cf580 TH |
1683 | .name = "byte_queue_limits", |
1684 | .attrs = dql_attrs, | |
1685 | }; | |
74293ea1 BL |
1686 | #else |
1687 | /* Fake declaration, all the code using it should be dead */ | |
77461c10 | 1688 | static const struct attribute_group dql_group = {}; |
114cf580 TH |
1689 | #endif /* CONFIG_BQL */ |
1690 | ||
ccf5ff69 | 1691 | #ifdef CONFIG_XPS |
2db6cdae AT |
1692 | static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, |
1693 | int tc, char *buf, enum xps_map_type type) | |
1d24eb48 | 1694 | { |
1d24eb48 | 1695 | struct xps_dev_maps *dev_maps; |
d9a063d2 | 1696 | unsigned long *mask; |
2db6cdae AT |
1697 | unsigned int nr_ids; |
1698 | int j, len; | |
d7be87a6 | 1699 | |
5478fcd0 | 1700 | rcu_read_lock(); |
2db6cdae AT |
1701 | dev_maps = rcu_dereference(dev->xps_maps[type]); |
1702 | ||
1703 | /* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0 | |
1704 | * when dev_maps hasn't been allocated yet, to be backward compatible. | |
1705 | */ | |
1706 | nr_ids = dev_maps ? dev_maps->nr_ids : | |
1707 | (type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues); | |
5478fcd0 | 1708 | |
7f08ec6e | 1709 | mask = bitmap_zalloc(nr_ids, GFP_NOWAIT); |
ea4fe7e8 | 1710 | if (!mask) { |
2db6cdae AT |
1711 | rcu_read_unlock(); |
1712 | return -ENOMEM; | |
fb250385 | 1713 | } |
664088f8 | 1714 | |
255c04a8 | 1715 | if (!dev_maps || tc >= dev_maps->num_tc) |
73f5e52b AT |
1716 | goto out_no_maps; |
1717 | ||
6f36158e | 1718 | for (j = 0; j < nr_ids; j++) { |
255c04a8 | 1719 | int i, tci = j * dev_maps->num_tc + tc; |
73f5e52b AT |
1720 | struct xps_map *map; |
1721 | ||
1722 | map = rcu_dereference(dev_maps->attr_map[tci]); | |
1723 | if (!map) | |
1724 | continue; | |
1725 | ||
1726 | for (i = map->len; i--;) { | |
1727 | if (map->queues[i] == index) { | |
08a7abf4 | 1728 | __set_bit(j, mask); |
73f5e52b | 1729 | break; |
1d24eb48 TH |
1730 | } |
1731 | } | |
1732 | } | |
73f5e52b | 1733 | out_no_maps: |
1d24eb48 | 1734 | rcu_read_unlock(); |
fb250385 | 1735 | |
5478fcd0 | 1736 | len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids); |
ea4fe7e8 | 1737 | bitmap_free(mask); |
2db6cdae | 1738 | |
f0906827 | 1739 | return len < PAGE_SIZE ? len : -EINVAL; |
2db6cdae AT |
1740 | } |
1741 | ||
b0b6fcfa AT |
1742 | static ssize_t xps_cpus_show(struct kobject *kobj, struct attribute *attr, |
1743 | struct netdev_queue *queue, char *buf) | |
2db6cdae AT |
1744 | { |
1745 | struct net_device *dev = queue->dev; | |
1746 | unsigned int index; | |
b0b6fcfa | 1747 | int len, tc, ret; |
2db6cdae AT |
1748 | |
1749 | if (!netif_is_multiqueue(dev)) | |
1750 | return -ENOENT; | |
1751 | ||
1752 | index = get_netdev_queue_index(queue); | |
1753 | ||
b0b6fcfa AT |
1754 | ret = sysfs_rtnl_lock(kobj, attr, queue->dev); |
1755 | if (ret) | |
1756 | return ret; | |
2db6cdae AT |
1757 | |
1758 | /* If queue belongs to subordinate dev use its map */ | |
1759 | dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; | |
1760 | ||
1761 | tc = netdev_txq_to_tc(dev, index); | |
1762 | if (tc < 0) { | |
1763 | rtnl_unlock(); | |
1764 | return -EINVAL; | |
1765 | } | |
1766 | ||
b0b6fcfa AT |
1767 | /* Increase the net device refcnt to make sure it won't be freed while |
1768 | * xps_queue_show is running. | |
1769 | */ | |
1770 | dev_hold(dev); | |
2db6cdae AT |
1771 | rtnl_unlock(); |
1772 | ||
1773 | len = xps_queue_show(dev, index, tc, buf, XPS_CPUS); | |
fb250385 | 1774 | |
b0b6fcfa | 1775 | dev_put(dev); |
2db6cdae | 1776 | return len; |
1d24eb48 TH |
1777 | } |
1778 | ||
b0b6fcfa AT |
1779 | static ssize_t xps_cpus_store(struct kobject *kobj, struct attribute *attr, |
1780 | struct netdev_queue *queue, const char *buf, | |
1781 | size_t len) | |
1d24eb48 TH |
1782 | { |
1783 | struct net_device *dev = queue->dev; | |
d9a063d2 | 1784 | unsigned int index; |
537c00de AD |
1785 | cpumask_var_t mask; |
1786 | int err; | |
1d24eb48 | 1787 | |
d7be9775 AD |
1788 | if (!netif_is_multiqueue(dev)) |
1789 | return -ENOENT; | |
1790 | ||
1d24eb48 TH |
1791 | if (!capable(CAP_NET_ADMIN)) |
1792 | return -EPERM; | |
1793 | ||
1794 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | |
1795 | return -ENOMEM; | |
1796 | ||
1797 | index = get_netdev_queue_index(queue); | |
1798 | ||
1799 | err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); | |
1800 | if (err) { | |
1801 | free_cpumask_var(mask); | |
1802 | return err; | |
1803 | } | |
1804 | ||
b0b6fcfa AT |
1805 | err = sysfs_rtnl_lock(kobj, attr, dev); |
1806 | if (err) { | |
1ad58225 | 1807 | free_cpumask_var(mask); |
b0b6fcfa | 1808 | return err; |
1ad58225 AT |
1809 | } |
1810 | ||
537c00de | 1811 | err = netif_set_xps_queue(dev, mask, index); |
1ad58225 | 1812 | rtnl_unlock(); |
1d24eb48 TH |
1813 | |
1814 | free_cpumask_var(mask); | |
1d24eb48 | 1815 | |
537c00de | 1816 | return err ? : len; |
1d24eb48 TH |
1817 | } |
1818 | ||
2b9c7581 | 1819 | static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init |
1820 | = __ATTR_RW(xps_cpus); | |
8af2c06f | 1821 | |
b0b6fcfa AT |
1822 | static ssize_t xps_rxqs_show(struct kobject *kobj, struct attribute *attr, |
1823 | struct netdev_queue *queue, char *buf) | |
8af2c06f AN |
1824 | { |
1825 | struct net_device *dev = queue->dev; | |
2db6cdae | 1826 | unsigned int index; |
b0b6fcfa | 1827 | int tc, ret; |
8af2c06f AN |
1828 | |
1829 | index = get_netdev_queue_index(queue); | |
1830 | ||
b0b6fcfa AT |
1831 | ret = sysfs_rtnl_lock(kobj, attr, dev); |
1832 | if (ret) | |
1833 | return ret; | |
4ae2bb81 | 1834 | |
255c04a8 | 1835 | tc = netdev_txq_to_tc(dev, index); |
b0b6fcfa AT |
1836 | |
1837 | /* Increase the net device refcnt to make sure it won't be freed while | |
1838 | * xps_queue_show is running. | |
1839 | */ | |
1840 | dev_hold(dev); | |
d7be87a6 | 1841 | rtnl_unlock(); |
255c04a8 | 1842 | |
b0b6fcfa AT |
1843 | ret = tc >= 0 ? xps_queue_show(dev, index, tc, buf, XPS_RXQS) : -EINVAL; |
1844 | dev_put(dev); | |
1845 | return ret; | |
8af2c06f AN |
1846 | } |
1847 | ||
b0b6fcfa AT |
1848 | static ssize_t xps_rxqs_store(struct kobject *kobj, struct attribute *attr, |
1849 | struct netdev_queue *queue, const char *buf, | |
8af2c06f AN |
1850 | size_t len) |
1851 | { | |
1852 | struct net_device *dev = queue->dev; | |
1853 | struct net *net = dev_net(dev); | |
d9a063d2 AT |
1854 | unsigned long *mask; |
1855 | unsigned int index; | |
8af2c06f AN |
1856 | int err; |
1857 | ||
1858 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) | |
1859 | return -EPERM; | |
1860 | ||
29ca1c5a | 1861 | mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL); |
8af2c06f AN |
1862 | if (!mask) |
1863 | return -ENOMEM; | |
1864 | ||
1865 | index = get_netdev_queue_index(queue); | |
1866 | ||
1867 | err = bitmap_parse(buf, len, mask, dev->num_rx_queues); | |
1868 | if (err) { | |
29ca1c5a | 1869 | bitmap_free(mask); |
8af2c06f AN |
1870 | return err; |
1871 | } | |
1872 | ||
b0b6fcfa AT |
1873 | err = sysfs_rtnl_lock(kobj, attr, dev); |
1874 | if (err) { | |
2d57b4f1 | 1875 | bitmap_free(mask); |
b0b6fcfa | 1876 | return err; |
2d57b4f1 AT |
1877 | } |
1878 | ||
4d99f660 | 1879 | cpus_read_lock(); |
044ab86d | 1880 | err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS); |
4d99f660 AV |
1881 | cpus_read_unlock(); |
1882 | ||
2d57b4f1 AT |
1883 | rtnl_unlock(); |
1884 | ||
29ca1c5a | 1885 | bitmap_free(mask); |
8af2c06f AN |
1886 | return err ? : len; |
1887 | } | |
1888 | ||
1889 | static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init | |
1890 | = __ATTR_RW(xps_rxqs); | |
ccf5ff69 | 1891 | #endif /* CONFIG_XPS */ |
1d24eb48 | 1892 | |
2b9c7581 | 1893 | static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { |
ccf5ff69 | 1894 | &queue_trans_timeout.attr, |
8d059b0f | 1895 | &queue_traffic_class.attr, |
ccf5ff69 | 1896 | #ifdef CONFIG_XPS |
1d24eb48 | 1897 | &xps_cpus_attribute.attr, |
8af2c06f | 1898 | &xps_rxqs_attribute.attr, |
822b3b2e | 1899 | &queue_tx_maxrate.attr, |
ccf5ff69 | 1900 | #endif |
1d24eb48 TH |
1901 | NULL |
1902 | }; | |
be0d6926 | 1903 | ATTRIBUTE_GROUPS(netdev_queue_default); |
1d24eb48 TH |
1904 | |
1905 | static void netdev_queue_release(struct kobject *kobj) | |
1906 | { | |
1907 | struct netdev_queue *queue = to_netdev_queue(kobj); | |
1d24eb48 | 1908 | |
1d24eb48 | 1909 | memset(kobj, 0, sizeof(*kobj)); |
d62607c3 | 1910 | netdev_put(queue->dev, &queue->dev_tracker); |
1d24eb48 TH |
1911 | } |
1912 | ||
02a476d9 | 1913 | static const void *netdev_queue_namespace(const struct kobject *kobj) |
82ef3d5d WC |
1914 | { |
1915 | struct netdev_queue *queue = to_netdev_queue(kobj); | |
1916 | struct device *dev = &queue->dev->dev; | |
1917 | const void *ns = NULL; | |
1918 | ||
8f088541 | 1919 | if (dev->class && dev->class->namespace) |
82ef3d5d WC |
1920 | ns = dev->class->namespace(dev); |
1921 | ||
1922 | return ns; | |
1923 | } | |
1924 | ||
02a476d9 | 1925 | static void netdev_queue_get_ownership(const struct kobject *kobj, |
b0e37c0d DT |
1926 | kuid_t *uid, kgid_t *gid) |
1927 | { | |
1928 | const struct net *net = netdev_queue_namespace(kobj); | |
1929 | ||
1930 | net_ns_get_ownership(net, uid, gid); | |
1931 | } | |
1932 | ||
b2793517 | 1933 | static const struct kobj_type netdev_queue_ktype = { |
1d24eb48 TH |
1934 | .sysfs_ops = &netdev_queue_sysfs_ops, |
1935 | .release = netdev_queue_release, | |
82ef3d5d | 1936 | .namespace = netdev_queue_namespace, |
b0e37c0d | 1937 | .get_ownership = netdev_queue_get_ownership, |
1d24eb48 TH |
1938 | }; |
1939 | ||
74293ea1 BL |
1940 | static bool netdev_uses_bql(const struct net_device *dev) |
1941 | { | |
00d066a4 | 1942 | if (dev->lltx || (dev->priv_flags & IFF_NO_QUEUE)) |
74293ea1 BL |
1943 | return false; |
1944 | ||
1945 | return IS_ENABLED(CONFIG_BQL); | |
1946 | } | |
1947 | ||
6b53dafe | 1948 | static int netdev_queue_add_kobject(struct net_device *dev, int index) |
1d24eb48 | 1949 | { |
6b53dafe | 1950 | struct netdev_queue *queue = dev->_tx + index; |
1d24eb48 TH |
1951 | struct kobject *kobj = &queue->kobj; |
1952 | int error = 0; | |
1953 | ||
7e54f85c AT |
1954 | /* Tx queues are cleared in netdev_queue_release to allow later |
1955 | * re-registration. This is triggered when their kobj refcount is | |
1956 | * dropped. | |
1957 | * | |
1958 | * If a queue is removed while both a read (or write) operation and a | |
1959 | * the re-addition of the same queue are pending (waiting on rntl_lock) | |
1960 | * it might happen that the re-addition will execute before the read, | |
1961 | * making the initial removal to never happen (queue's kobj refcount | |
1962 | * won't drop enough because of the pending read). In such rare case, | |
1963 | * return to allow the removal operation to complete. | |
1964 | */ | |
1965 | if (unlikely(kobj->state_initialized)) { | |
1966 | netdev_warn_once(dev, "Cannot re-add tx queues before their removal completed"); | |
1967 | return -EAGAIN; | |
1968 | } | |
1969 | ||
e0b60903 JH |
1970 | /* Kobject_put later will trigger netdev_queue_release call |
1971 | * which decreases dev refcount: Take that reference here | |
1972 | */ | |
d62607c3 | 1973 | netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); |
e0b60903 | 1974 | |
6b53dafe | 1975 | kobj->kset = dev->queues_kset; |
1d24eb48 | 1976 | error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, |
6648c65e | 1977 | "tx-%u", index); |
114cf580 | 1978 | if (error) |
b8eb7183 | 1979 | goto err; |
114cf580 | 1980 | |
b7ecc1de AT |
1981 | queue->groups = netdev_queue_default_groups; |
1982 | error = sysfs_create_groups(kobj, queue->groups); | |
1983 | if (error) | |
1984 | goto err; | |
1985 | ||
74293ea1 BL |
1986 | if (netdev_uses_bql(dev)) { |
1987 | error = sysfs_create_group(kobj, &dql_group); | |
1988 | if (error) | |
b7ecc1de | 1989 | goto err_default_groups; |
74293ea1 | 1990 | } |
1d24eb48 TH |
1991 | |
1992 | kobject_uevent(kobj, KOBJ_ADD); | |
48a322b6 | 1993 | return 0; |
1d24eb48 | 1994 | |
b7ecc1de AT |
1995 | err_default_groups: |
1996 | sysfs_remove_groups(kobj, queue->groups); | |
b8eb7183 JH |
1997 | err: |
1998 | kobject_put(kobj); | |
1999 | return error; | |
1d24eb48 | 2000 | } |
d755407d CB |
2001 | |
2002 | static int tx_queue_change_owner(struct net_device *ndev, int index, | |
2003 | kuid_t kuid, kgid_t kgid) | |
2004 | { | |
2005 | struct netdev_queue *queue = ndev->_tx + index; | |
2006 | struct kobject *kobj = &queue->kobj; | |
2007 | int error; | |
2008 | ||
2009 | error = sysfs_change_owner(kobj, kuid, kgid); | |
2010 | if (error) | |
2011 | return error; | |
2012 | ||
74293ea1 BL |
2013 | if (netdev_uses_bql(ndev)) |
2014 | error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid); | |
2015 | ||
d755407d CB |
2016 | return error; |
2017 | } | |
ccf5ff69 | 2018 | #endif /* CONFIG_SYSFS */ |
1d24eb48 TH |
2019 | |
2020 | int | |
6b53dafe | 2021 | netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) |
1d24eb48 | 2022 | { |
ccf5ff69 | 2023 | #ifdef CONFIG_SYSFS |
1d24eb48 TH |
2024 | int i; |
2025 | int error = 0; | |
2026 | ||
5f1c802c AT |
2027 | /* Tx queue kobjects are allowed to be updated when a device is being |
2028 | * unregistered, but solely to remove queues from qdiscs. Any path | |
2029 | * adding queues should be fixed. | |
2030 | */ | |
2031 | WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num, | |
2032 | "New queues can't be registered after device unregistration."); | |
2033 | ||
1d24eb48 | 2034 | for (i = old_num; i < new_num; i++) { |
6b53dafe | 2035 | error = netdev_queue_add_kobject(dev, i); |
1d24eb48 TH |
2036 | if (error) { |
2037 | new_num = old_num; | |
2038 | break; | |
2039 | } | |
2040 | } | |
2041 | ||
114cf580 | 2042 | while (--i >= new_num) { |
6b53dafe | 2043 | struct netdev_queue *queue = dev->_tx + i; |
114cf580 | 2044 | |
8b8f3e66 | 2045 | if (!refcount_read(&dev_net(dev)->ns.count)) |
002d8a1a | 2046 | queue->kobj.uevent_suppress = 1; |
74293ea1 BL |
2047 | |
2048 | if (netdev_uses_bql(dev)) | |
2049 | sysfs_remove_group(&queue->kobj, &dql_group); | |
2050 | ||
b7ecc1de | 2051 | sysfs_remove_groups(&queue->kobj, queue->groups); |
114cf580 TH |
2052 | kobject_put(&queue->kobj); |
2053 | } | |
1d24eb48 TH |
2054 | |
2055 | return error; | |
bf264145 TH |
2056 | #else |
2057 | return 0; | |
ccf5ff69 | 2058 | #endif /* CONFIG_SYSFS */ |
1d24eb48 TH |
2059 | } |
2060 | ||
d755407d CB |
2061 | static int net_tx_queue_change_owner(struct net_device *dev, int num, |
2062 | kuid_t kuid, kgid_t kgid) | |
2063 | { | |
2064 | #ifdef CONFIG_SYSFS | |
2065 | int error = 0; | |
2066 | int i; | |
2067 | ||
2068 | for (i = 0; i < num; i++) { | |
2069 | error = tx_queue_change_owner(dev, i, kuid, kgid); | |
2070 | if (error) | |
2071 | break; | |
2072 | } | |
2073 | ||
2074 | return error; | |
2075 | #else | |
2076 | return 0; | |
2077 | #endif /* CONFIG_SYSFS */ | |
2078 | } | |
2079 | ||
6b53dafe | 2080 | static int register_queue_kobjects(struct net_device *dev) |
1d24eb48 | 2081 | { |
bf264145 | 2082 | int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; |
1d24eb48 | 2083 | |
ccf5ff69 | 2084 | #ifdef CONFIG_SYSFS |
6b53dafe | 2085 | dev->queues_kset = kset_create_and_add("queues", |
6648c65e | 2086 | NULL, &dev->dev.kobj); |
6b53dafe | 2087 | if (!dev->queues_kset) |
62fe0b40 | 2088 | return -ENOMEM; |
6b53dafe | 2089 | real_rx = dev->real_num_rx_queues; |
bf264145 | 2090 | #endif |
6b53dafe | 2091 | real_tx = dev->real_num_tx_queues; |
1d24eb48 | 2092 | |
6b53dafe | 2093 | error = net_rx_queue_update_kobjects(dev, 0, real_rx); |
1d24eb48 TH |
2094 | if (error) |
2095 | goto error; | |
bf264145 | 2096 | rxq = real_rx; |
1d24eb48 | 2097 | |
6b53dafe | 2098 | error = netdev_queue_update_kobjects(dev, 0, real_tx); |
1d24eb48 TH |
2099 | if (error) |
2100 | goto error; | |
bf264145 | 2101 | txq = real_tx; |
1d24eb48 TH |
2102 | |
2103 | return 0; | |
2104 | ||
2105 | error: | |
6b53dafe WC |
2106 | netdev_queue_update_kobjects(dev, txq, 0); |
2107 | net_rx_queue_update_kobjects(dev, rxq, 0); | |
895a5e96 Y |
2108 | #ifdef CONFIG_SYSFS |
2109 | kset_unregister(dev->queues_kset); | |
2110 | #endif | |
1d24eb48 | 2111 | return error; |
62fe0b40 | 2112 | } |
0a9627f2 | 2113 | |
d755407d CB |
2114 | static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid) |
2115 | { | |
2116 | int error = 0, real_rx = 0, real_tx = 0; | |
2117 | ||
2118 | #ifdef CONFIG_SYSFS | |
2119 | if (ndev->queues_kset) { | |
2120 | error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid); | |
2121 | if (error) | |
2122 | return error; | |
2123 | } | |
2124 | real_rx = ndev->real_num_rx_queues; | |
2125 | #endif | |
2126 | real_tx = ndev->real_num_tx_queues; | |
2127 | ||
2128 | error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid); | |
2129 | if (error) | |
2130 | return error; | |
2131 | ||
2132 | error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid); | |
2133 | if (error) | |
2134 | return error; | |
2135 | ||
2136 | return 0; | |
2137 | } | |
2138 | ||
6b53dafe | 2139 | static void remove_queue_kobjects(struct net_device *dev) |
62fe0b40 | 2140 | { |
bf264145 TH |
2141 | int real_rx = 0, real_tx = 0; |
2142 | ||
a953be53 | 2143 | #ifdef CONFIG_SYSFS |
6b53dafe | 2144 | real_rx = dev->real_num_rx_queues; |
bf264145 | 2145 | #endif |
6b53dafe | 2146 | real_tx = dev->real_num_tx_queues; |
bf264145 | 2147 | |
6b53dafe WC |
2148 | net_rx_queue_update_kobjects(dev, real_rx, 0); |
2149 | netdev_queue_update_kobjects(dev, real_tx, 0); | |
d7dac083 | 2150 | |
0a65dcf6 | 2151 | netdev_lock_ops(dev); |
d7dac083 AT |
2152 | dev->real_num_rx_queues = 0; |
2153 | dev->real_num_tx_queues = 0; | |
0a65dcf6 | 2154 | netdev_unlock_ops(dev); |
ccf5ff69 | 2155 | #ifdef CONFIG_SYSFS |
6b53dafe | 2156 | kset_unregister(dev->queues_kset); |
bf264145 | 2157 | #endif |
0a9627f2 | 2158 | } |
608b4b95 | 2159 | |
7dc5dbc8 EB |
2160 | static bool net_current_may_mount(void) |
2161 | { | |
2162 | struct net *net = current->nsproxy->net_ns; | |
2163 | ||
2164 | return ns_capable(net->user_ns, CAP_SYS_ADMIN); | |
2165 | } | |
2166 | ||
a685e089 | 2167 | static void *net_grab_current_ns(void) |
608b4b95 | 2168 | { |
a685e089 AV |
2169 | struct net *ns = current->nsproxy->net_ns; |
2170 | #ifdef CONFIG_NET_NS | |
2171 | if (ns) | |
c122e14d | 2172 | refcount_inc(&ns->passive); |
a685e089 AV |
2173 | #endif |
2174 | return ns; | |
608b4b95 EB |
2175 | } |
2176 | ||
2177 | static const void *net_initial_ns(void) | |
2178 | { | |
2179 | return &init_net; | |
2180 | } | |
2181 | ||
2182 | static const void *net_netlink_ns(struct sock *sk) | |
2183 | { | |
2184 | return sock_net(sk); | |
2185 | } | |
2186 | ||
737aec57 | 2187 | const struct kobj_ns_type_operations net_ns_type_operations = { |
608b4b95 | 2188 | .type = KOBJ_NS_TYPE_NET, |
7dc5dbc8 | 2189 | .current_may_mount = net_current_may_mount, |
a685e089 | 2190 | .grab_current_ns = net_grab_current_ns, |
608b4b95 EB |
2191 | .netlink_ns = net_netlink_ns, |
2192 | .initial_ns = net_initial_ns, | |
a685e089 | 2193 | .drop_ns = net_drop_ns, |
608b4b95 | 2194 | }; |
04600794 | 2195 | EXPORT_SYMBOL_GPL(net_ns_type_operations); |
608b4b95 | 2196 | |
23680f0b | 2197 | static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env) |
1da177e4 | 2198 | { |
23680f0b | 2199 | const struct net_device *dev = to_net_dev(d); |
7eff2e7a | 2200 | int retval; |
1da177e4 | 2201 | |
312c004d | 2202 | /* pass interface to uevent. */ |
7eff2e7a | 2203 | retval = add_uevent_var(env, "INTERFACE=%s", dev->name); |
bf62456e ER |
2204 | if (retval) |
2205 | goto exit; | |
ca2f37db JT |
2206 | |
2207 | /* pass ifindex to uevent. | |
2208 | * ifindex is useful as it won't change (interface name may change) | |
6648c65e | 2209 | * and is what RtNetlink uses natively. |
2210 | */ | |
7eff2e7a | 2211 | retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); |
1da177e4 | 2212 | |
bf62456e | 2213 | exit: |
bf62456e | 2214 | return retval; |
1da177e4 | 2215 | } |
1da177e4 LT |
2216 | |
2217 | /* | |
4ec93edb | 2218 | * netdev_release -- destroy and free a dead device. |
43cb76d9 | 2219 | * Called when last reference to device kobject is gone. |
1da177e4 | 2220 | */ |
43cb76d9 | 2221 | static void netdev_release(struct device *d) |
1da177e4 | 2222 | { |
43cb76d9 | 2223 | struct net_device *dev = to_net_dev(d); |
1da177e4 LT |
2224 | |
2225 | BUG_ON(dev->reg_state != NETREG_RELEASED); | |
2226 | ||
6c557001 FW |
2227 | /* no need to wait for rcu grace period: |
2228 | * device is dead and about to be freed. | |
2229 | */ | |
2230 | kfree(rcu_access_pointer(dev->ifalias)); | |
13cabc47 | 2231 | kvfree(dev); |
1da177e4 LT |
2232 | } |
2233 | ||
fa627348 | 2234 | static const void *net_namespace(const struct device *d) |
608b4b95 | 2235 | { |
fa627348 | 2236 | const struct net_device *dev = to_net_dev(d); |
5c29482d | 2237 | |
608b4b95 EB |
2238 | return dev_net(dev); |
2239 | } | |
2240 | ||
fa627348 | 2241 | static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid) |
b0e37c0d | 2242 | { |
fa627348 | 2243 | const struct net_device *dev = to_net_dev(d); |
b0e37c0d DT |
2244 | const struct net *net = dev_net(dev); |
2245 | ||
2246 | net_ns_get_ownership(net, uid, gid); | |
2247 | } | |
2248 | ||
9382b4f3 | 2249 | static const struct class net_class = { |
1da177e4 | 2250 | .name = "net", |
43cb76d9 | 2251 | .dev_release = netdev_release, |
6be8aeef | 2252 | .dev_groups = net_class_groups, |
43cb76d9 | 2253 | .dev_uevent = netdev_uevent, |
608b4b95 EB |
2254 | .ns_type = &net_ns_type_operations, |
2255 | .namespace = net_namespace, | |
b0e37c0d | 2256 | .get_ownership = net_get_ownership, |
1da177e4 LT |
2257 | }; |
2258 | ||
e330fb14 | 2259 | #ifdef CONFIG_OF |
aa836df9 FF |
2260 | static int of_dev_node_match(struct device *dev, const void *data) |
2261 | { | |
2e186a2c TW |
2262 | for (; dev; dev = dev->parent) { |
2263 | if (dev->of_node == data) | |
2264 | return 1; | |
2265 | } | |
aa836df9 | 2266 | |
2e186a2c | 2267 | return 0; |
aa836df9 FF |
2268 | } |
2269 | ||
9861f720 RK |
2270 | /* |
2271 | * of_find_net_device_by_node - lookup the net device for the device node | |
2272 | * @np: OF device node | |
2273 | * | |
2274 | * Looks up the net_device structure corresponding with the device node. | |
2275 | * If successful, returns a pointer to the net_device with the embedded | |
2276 | * struct device refcount incremented by one, or NULL on failure. The | |
2277 | * refcount must be dropped when done with the net_device. | |
2278 | */ | |
aa836df9 FF |
2279 | struct net_device *of_find_net_device_by_node(struct device_node *np) |
2280 | { | |
2281 | struct device *dev; | |
2282 | ||
2283 | dev = class_find_device(&net_class, NULL, np, of_dev_node_match); | |
2284 | if (!dev) | |
2285 | return NULL; | |
2286 | ||
2287 | return to_net_dev(dev); | |
2288 | } | |
2289 | EXPORT_SYMBOL(of_find_net_device_by_node); | |
2290 | #endif | |
2291 | ||
9093bbb2 SH |
2292 | /* Delete sysfs entries but hold kobject reference until after all |
2293 | * netdev references are gone. | |
2294 | */ | |
6b53dafe | 2295 | void netdev_unregister_kobject(struct net_device *ndev) |
1da177e4 | 2296 | { |
6648c65e | 2297 | struct device *dev = &ndev->dev; |
9093bbb2 | 2298 | |
8b8f3e66 | 2299 | if (!refcount_read(&dev_net(ndev)->ns.count)) |
002d8a1a AV |
2300 | dev_set_uevent_suppress(dev, 1); |
2301 | ||
9093bbb2 | 2302 | kobject_get(&dev->kobj); |
3891845e | 2303 | |
6b53dafe | 2304 | remove_queue_kobjects(ndev); |
0a9627f2 | 2305 | |
9802c8e2 ML |
2306 | pm_runtime_set_memalloc_noio(dev, false); |
2307 | ||
9093bbb2 | 2308 | device_del(dev); |
1da177e4 LT |
2309 | } |
2310 | ||
2311 | /* Create sysfs entries for network device. */ | |
6b53dafe | 2312 | int netdev_register_kobject(struct net_device *ndev) |
1da177e4 | 2313 | { |
6648c65e | 2314 | struct device *dev = &ndev->dev; |
6b53dafe | 2315 | const struct attribute_group **groups = ndev->sysfs_groups; |
0a9627f2 | 2316 | int error = 0; |
1da177e4 | 2317 | |
a1b3f594 | 2318 | device_initialize(dev); |
43cb76d9 | 2319 | dev->class = &net_class; |
6b53dafe | 2320 | dev->platform_data = ndev; |
43cb76d9 | 2321 | dev->groups = groups; |
1da177e4 | 2322 | |
6b53dafe | 2323 | dev_set_name(dev, "%s", ndev->name); |
1da177e4 | 2324 | |
8b41d188 | 2325 | #ifdef CONFIG_SYSFS |
0c509a6c EB |
2326 | /* Allow for a device specific group */ |
2327 | if (*groups) | |
2328 | groups++; | |
1da177e4 | 2329 | |
0c509a6c | 2330 | *groups++ = &netstat_group; |
38c1a01c | 2331 | |
c304eddc | 2332 | if (wireless_group_needed(ndev)) |
38c1a01c | 2333 | *groups++ = &wireless_group; |
8b41d188 | 2334 | #endif /* CONFIG_SYSFS */ |
1da177e4 | 2335 | |
0a9627f2 TH |
2336 | error = device_add(dev); |
2337 | if (error) | |
8ed633b9 | 2338 | return error; |
0a9627f2 | 2339 | |
6b53dafe | 2340 | error = register_queue_kobjects(ndev); |
8ed633b9 WH |
2341 | if (error) { |
2342 | device_del(dev); | |
2343 | return error; | |
2344 | } | |
0a9627f2 | 2345 | |
9802c8e2 ML |
2346 | pm_runtime_set_memalloc_noio(dev, true); |
2347 | ||
0a9627f2 | 2348 | return error; |
1da177e4 LT |
2349 | } |
2350 | ||
e6dee9f3 CB |
2351 | /* Change owner for sysfs entries when moving network devices across network |
2352 | * namespaces owned by different user namespaces. | |
2353 | */ | |
2354 | int netdev_change_owner(struct net_device *ndev, const struct net *net_old, | |
2355 | const struct net *net_new) | |
2356 | { | |
f7a1e76d XL |
2357 | kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; |
2358 | kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; | |
e6dee9f3 | 2359 | struct device *dev = &ndev->dev; |
e6dee9f3 CB |
2360 | int error; |
2361 | ||
2362 | net_ns_get_ownership(net_old, &old_uid, &old_gid); | |
2363 | net_ns_get_ownership(net_new, &new_uid, &new_gid); | |
2364 | ||
2365 | /* The network namespace was changed but the owning user namespace is | |
2366 | * identical so there's no need to change the owner of sysfs entries. | |
2367 | */ | |
2368 | if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid)) | |
2369 | return 0; | |
2370 | ||
2371 | error = device_change_owner(dev, new_uid, new_gid); | |
2372 | if (error) | |
2373 | return error; | |
2374 | ||
d755407d CB |
2375 | error = queue_change_owner(ndev, new_uid, new_gid); |
2376 | if (error) | |
2377 | return error; | |
2378 | ||
e6dee9f3 CB |
2379 | return 0; |
2380 | } | |
2381 | ||
b793dc5c | 2382 | int netdev_class_create_file_ns(const struct class_attribute *class_attr, |
58292cbe | 2383 | const void *ns) |
b8a9787e | 2384 | { |
58292cbe | 2385 | return class_create_file_ns(&net_class, class_attr, ns); |
b8a9787e | 2386 | } |
58292cbe | 2387 | EXPORT_SYMBOL(netdev_class_create_file_ns); |
b8a9787e | 2388 | |
b793dc5c | 2389 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
58292cbe | 2390 | const void *ns) |
b8a9787e | 2391 | { |
58292cbe | 2392 | class_remove_file_ns(&net_class, class_attr, ns); |
b8a9787e | 2393 | } |
58292cbe | 2394 | EXPORT_SYMBOL(netdev_class_remove_file_ns); |
b8a9787e | 2395 | |
a48d4bb0 | 2396 | int __init netdev_kobject_init(void) |
1da177e4 | 2397 | { |
608b4b95 | 2398 | kobj_ns_type_register(&net_ns_type_operations); |
1da177e4 LT |
2399 | return class_register(&net_class); |
2400 | } |